aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/testing/selftests/bpf/progs
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/bpf/progs')
-rw-r--r--tools/testing/selftests/bpf/progs/access_map_in_map.c93
-rw-r--r--tools/testing/selftests/bpf/progs/arena_atomics.c394
-rw-r--r--tools/testing/selftests/bpf/progs/arena_htab.c59
-rw-r--r--tools/testing/selftests/bpf/progs/arena_htab_asm.c5
-rw-r--r--tools/testing/selftests/bpf/progs/arena_list.c88
-rw-r--r--tools/testing/selftests/bpf/progs/arena_spin_lock.c51
-rw-r--r--tools/testing/selftests/bpf/progs/async_stack_depth.c61
-rw-r--r--tools/testing/selftests/bpf/progs/atomic_bounds.c24
-rw-r--r--tools/testing/selftests/bpf/progs/atomics.c170
-rw-r--r--tools/testing/selftests/bpf/progs/bad_struct_ops.c25
-rw-r--r--tools/testing/selftests/bpf/progs/bad_struct_ops2.c14
-rw-r--r--tools/testing/selftests/bpf/progs/bench_local_storage_create.c83
-rw-r--r--tools/testing/selftests/bpf/progs/bench_sockmap_prog.c65
-rw-r--r--tools/testing/selftests/bpf/progs/bind4_prog.c167
-rw-r--r--tools/testing/selftests/bpf/progs/bind6_prog.c184
-rw-r--r--tools/testing/selftests/bpf/progs/bind_perm.c45
-rw-r--r--tools/testing/selftests/bpf/progs/bind_prog.h19
-rw-r--r--tools/testing/selftests/bpf/progs/bloom_filter_bench.c154
-rw-r--r--tools/testing/selftests/bpf/progs/bloom_filter_map.c83
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h542
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cc_cubic.c189
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_compiler.h33
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c113
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c144
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp_release.c22
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c22
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c40
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c63
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter.h130
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c29
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c12
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c21
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c5
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c4
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c65
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c24
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_ksym.c71
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c22
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_netlink.c3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c71
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c60
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task.c25
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_file.c12
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c35
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c62
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tasks.c198
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c9
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c7
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_udp4.c3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_udp6.c3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_unix.c80
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c37
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_loop.c225
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_loop_bench.c30
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h245
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_mod_race.c100
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_qdisc_common.h27
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_qdisc_fail__incompl_ops.c41
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c126
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c756
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_syscall_macro.c108
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c17
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tracing_net.h129
-rw-r--r--tools/testing/selftests/bpf/progs/bprm_opts.c34
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_signed_arr_elem_sz.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c12
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c4
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c84
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c173
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c59
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag.c25
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c73
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag_user.c40
-rw-r--r--tools/testing/selftests/bpf/progs/cb_refs.c114
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi.h2
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c4
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c4
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_ancestor.c40
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c58
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c16
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c69
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c156
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_iter.c38
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_mprog.c30
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_preorder.c41
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_read_xattr.c158
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c15
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_storage.c24
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_tcp_skb.c382
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h79
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c247
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c223
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c100
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_negative.c26
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c94
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c125
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c126
-rw-r--r--tools/testing/selftests/bpf/progs/compute_live_registers.c440
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_dropper.c28
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_prog.c15
-rw-r--r--tools/testing/selftests/bpf/progs/connect6_prog.c8
-rw-r--r--tools/testing/selftests/bpf/progs/connect_force_port4.c9
-rw-r--r--tools/testing/selftests/bpf/progs/connect_force_port6.c9
-rw-r--r--tools/testing/selftests/bpf/progs/connect_ping.c53
-rw-r--r--tools/testing/selftests/bpf/progs/connect_unix_prog.c45
-rw-r--r--tools/testing/selftests/bpf/progs/core_kern.c120
-rw-r--r--tools/testing/selftests/bpf/progs/core_kern_overflow.c22
-rw-r--r--tools/testing/selftests/bpf/progs/core_reloc_types.h260
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_common.h129
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_failure.c262
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_success.c890
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_basic.c68
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_bench.c107
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_common.h66
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_sanity.c161
-rw-r--r--tools/testing/selftests/bpf/progs/csum_diff_test.c42
-rw-r--r--tools/testing/selftests/bpf/progs/decap_sanity.c68
-rw-r--r--tools/testing/selftests/bpf/progs/dev_cgroup.c5
-rw-r--r--tools/testing/selftests/bpf/progs/dmabuf_iter.c101
-rw-r--r--tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c27
-rw-r--r--tools/testing/selftests/bpf/progs/dummy_st_ops_success.c56
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c1737
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_success.c1082
-rw-r--r--tools/testing/selftests/bpf/progs/empty_skb.c37
-rw-r--r--tools/testing/selftests/bpf/progs/epilogue_exit.c82
-rw-r--r--tools/testing/selftests/bpf/progs/epilogue_tailcall.c58
-rw-r--r--tools/testing/selftests/bpf/progs/err.h28
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions.c368
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_assert.c135
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_ext.c72
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_fail.c349
-rw-r--r--tools/testing/selftests/bpf/progs/exhandler_kern.c52
-rw-r--r--tools/testing/selftests/bpf/progs/fd_htab_lookup.c25
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_many_args.c39
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_recursive.c14
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_recursive_target.c25
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_test.c2
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c10
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_many_args.c40
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_sleep.c32
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_test.c4
-rw-r--r--tools/testing/selftests/bpf/progs/fib_lookup.c22
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma.c69
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma_fail1.c30
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma_fail2.c29
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_array_map_elem.c73
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c95
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_hash_modify.c30
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c27
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_multi_maps.c49
-rw-r--r--tools/testing/selftests/bpf/progs/free_timer.c71
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_attach_probe.c2
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_cls_redirect.c12
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_dead_global_func.c11
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_global_func.c18
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_progmap.c24
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_unreliable_prog.c20
-rw-r--r--tools/testing/selftests/bpf/progs/get_branch_snapshot.c40
-rw-r--r--tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c27
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_args_test.c123
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_ip_test.c105
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_ip_uprobe_test.c18
-rw-r--r--tools/testing/selftests/bpf/progs/getpeername4_prog.c24
-rw-r--r--tools/testing/selftests/bpf/progs/getpeername6_prog.c31
-rw-r--r--tools/testing/selftests/bpf/progs/getpeername_unix_prog.c38
-rw-r--r--tools/testing/selftests/bpf/progs/getsockname4_prog.c24
-rw-r--r--tools/testing/selftests/bpf/progs/getsockname6_prog.c31
-rw-r--r--tools/testing/selftests/bpf/progs/getsockname_unix_prog.c38
-rw-r--r--tools/testing/selftests/bpf/progs/htab_mem_bench.c105
-rw-r--r--tools/testing/selftests/bpf/progs/htab_reuse.c19
-rw-r--r--tools/testing/selftests/bpf/progs/htab_update.c29
-rw-r--r--tools/testing/selftests/bpf/progs/ima.c103
-rw-r--r--tools/testing/selftests/bpf/progs/inner_array_lookup.c45
-rw-r--r--tools/testing/selftests/bpf/progs/ip_check_defrag.c104
-rw-r--r--tools/testing/selftests/bpf/progs/irq.c566
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c1929
-rw-r--r--tools/testing/selftests/bpf/progs/iters_css.c72
-rw-r--r--tools/testing/selftests/bpf/progs/iters_css_task.c102
-rw-r--r--tools/testing/selftests/bpf/progs/iters_looping.c163
-rw-r--r--tools/testing/selftests/bpf/progs/iters_num.c242
-rw-r--r--tools/testing/selftests/bpf/progs/iters_state_safety.c426
-rw-r--r--tools/testing/selftests/bpf/progs/iters_task.c51
-rw-r--r--tools/testing/selftests/bpf/progs/iters_task_failure.c105
-rw-r--r--tools/testing/selftests/bpf/progs/iters_task_vma.c43
-rw-r--r--tools/testing/selftests/bpf/progs/iters_testmod.c125
-rw-r--r--tools/testing/selftests/bpf/progs/iters_testmod_seq.c129
-rw-r--r--tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c46
-rw-r--r--tools/testing/selftests/bpf/progs/jit_probe_mem.c59
-rw-r--r--tools/testing/selftests/bpf/progs/kfree_skb.c8
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_destructive.c13
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_fail.c160
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_race.c13
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test.c217
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c37
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_module_order.c30
-rw-r--r--tools/testing/selftests/bpf/progs/kmem_cache_iter.c108
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi.c162
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_empty.c12
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_override.c13
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_session.c78
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c58
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c31
-rw-r--r--tools/testing/selftests/bpf/progs/kptr_xchg_inline.c48
-rw-r--r--tools/testing/selftests/bpf/progs/ksym_race.c13
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs1.c97
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs2.c97
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.c420
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.h56
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list_fail.c612
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list_peek.c113
-rw-r--r--tools/testing/selftests/bpf/progs/linked_maps1.c82
-rw-r--r--tools/testing/selftests/bpf/progs/linked_maps2.c76
-rw-r--r--tools/testing/selftests/bpf/progs/linked_vars1.c54
-rw-r--r--tools/testing/selftests/bpf/progs/linked_vars2.c55
-rw-r--r--tools/testing/selftests/bpf/progs/local_kptr_stash.c285
-rw-r--r--tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c85
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage.c186
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage_bench.c104
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c67
-rw-r--r--tools/testing/selftests/bpf/progs/loop3.c4
-rw-r--r--tools/testing/selftests/bpf/progs/loop4.c4
-rw-r--r--tools/testing/selftests/bpf/progs/loop5.c1
-rw-r--r--tools/testing/selftests/bpf/progs/loop6.c102
-rw-r--r--tools/testing/selftests/bpf/progs/lru_bug.c49
-rw-r--r--tools/testing/selftests/bpf/progs/lsm.c81
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_cgroup.c192
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c14
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_tailcall.c34
-rw-r--r--tools/testing/selftests/bpf/progs/map_in_map_btf.c73
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr.c540
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr_fail.c388
-rw-r--r--tools/testing/selftests/bpf/progs/map_percpu_stats.c24
-rw-r--r--tools/testing/selftests/bpf/progs/map_ptr_kern.c35
-rw-r--r--tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c229
-rw-r--r--tools/testing/selftests/bpf/progs/missed_kprobe.c30
-rw-r--r--tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c54
-rw-r--r--tools/testing/selftests/bpf/progs/missed_tp_recursion.c41
-rw-r--r--tools/testing/selftests/bpf/progs/mmap_inner_array.c57
-rw-r--r--tools/testing/selftests/bpf/progs/modify_return.c40
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_bpf.h42
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_sock.c88
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_subflow.c128
-rw-r--r--tools/testing/selftests/bpf/progs/mptcpify.c24
-rw-r--r--tools/testing/selftests/bpf/progs/nested_acquire.c33
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_common.h12
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_failure.c41
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_success.c42
-rw-r--r--tools/testing/selftests/bpf/progs/net_timestamping.c248
-rw-r--r--tools/testing/selftests/bpf/progs/netcnt_prog.c10
-rw-r--r--tools/testing/selftests/bpf/progs/netif_receive_skb.c15
-rw-r--r--tools/testing/selftests/bpf/progs/netns_cookie_prog.c103
-rw-r--r--tools/testing/selftests/bpf/progs/normal_map_btf.c56
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_array.c190
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c109
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_fail.c182
-rw-r--r--tools/testing/selftests/bpf/progs/perf_event_stackmap.c8
-rw-r--r--tools/testing/selftests/bpf/progs/perfbuf_bench.c4
-rw-r--r--tools/testing/selftests/bpf/progs/preempt_lock.c212
-rw-r--r--tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c106
-rw-r--r--tools/testing/selftests/bpf/progs/prepare.c27
-rw-r--r--tools/testing/selftests/bpf/progs/priv_freplace_prog.c13
-rw-r--r--tools/testing/selftests/bpf/progs/priv_map.c13
-rw-r--r--tools/testing/selftests/bpf/progs/priv_prog.c13
-rw-r--r--tools/testing/selftests/bpf/progs/pro_epilogue.c154
-rw-r--r--tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c149
-rw-r--r--tools/testing/selftests/bpf/progs/pro_epilogue_with_kfunc.c88
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h175
-rw-r--r--tools/testing/selftests/bpf/progs/profiler1.c1
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf.h88
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf180.c22
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600.c11
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c6
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600_iter.c7
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600_nounroll.c3
-rw-r--r--tools/testing/selftests/bpf/progs/raw_tp_null.c31
-rw-r--r--tools/testing/selftests/bpf/progs/raw_tp_null_fail.c24
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree.c323
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c52
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c38
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_fail.c304
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_search.c206
-rw-r--r--tools/testing/selftests/bpf/progs/rcu_read_lock.c501
-rw-r--r--tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c36
-rw-r--r--tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c38
-rw-r--r--tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c60
-rw-r--r--tools/testing/selftests/bpf/progs/read_vsyscall.c59
-rw-r--r--tools/testing/selftests/bpf/progs/recursion.c43
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg4_prog.c40
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg6_prog.c46
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c38
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr.c571
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c121
-rw-r--r--tools/testing/selftests/bpf/progs/res_spin_lock.c147
-rw-r--r--tools/testing/selftests/bpf/progs/res_spin_lock_fail.c244
-rw-r--r--tools/testing/selftests/bpf/progs/ringbuf_bench.c3
-rw-r--r--tools/testing/selftests/bpf/progs/sample_map_ret0.c34
-rw-r--r--tools/testing/selftests/bpf/progs/sample_ret0.c7
-rw-r--r--tools/testing/selftests/bpf/progs/security_bpf_map.c69
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg4_prog.c13
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg6_prog.c64
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c45
-rw-r--r--tools/testing/selftests/bpf/progs/set_global_vars.c106
-rw-r--r--tools/testing/selftests/bpf/progs/setget_sockopt.c434
-rw-r--r--tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c59
-rw-r--r--tools/testing/selftests/bpf/progs/skb_load_bytes.c19
-rw-r--r--tools/testing/selftests/bpf/progs/skb_pkt_end.c64
-rw-r--r--tools/testing/selftests/bpf/progs/sock_addr_kern.c65
-rw-r--r--tools/testing/selftests/bpf/progs/sock_destroy_prog.c145
-rw-r--r--tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c22
-rw-r--r--tools/testing/selftests/bpf/progs/sock_iter_batch.c133
-rw-r--r--tools/testing/selftests/bpf/progs/socket_cookie_prog.c47
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_parse_prog.c4
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c14
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c18
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_inherit.c19
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_multi.c31
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c43
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_sk.c108
-rw-r--r--tools/testing/selftests/bpf/progs/stacktrace_map_skip.c68
-rw-r--r--tools/testing/selftests/bpf/progs/stream.c79
-rw-r--r--tools/testing/selftests/bpf/progs/stream_fail.c33
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c87
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c23
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_success.c37
-rw-r--r--tools/testing/selftests/bpf/progs/strncmp_bench.c53
-rw-r--r--tools/testing/selftests/bpf/progs/strncmp_test.c54
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h166
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c9
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_autocreate.c52
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c32
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_detach.c22
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c19
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c30
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c26
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c34
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c25
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c30
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c29
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c24
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_module.c90
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c102
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c22
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack.c62
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c62
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c50
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_refcounted.c31
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c39
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__ref_leak.c22
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c36
-rw-r--r--tools/testing/selftests/bpf/progs/summarization.c78
-rw-r--r--tools/testing/selftests/bpf/progs/summarization_freplace.c33
-rw-r--r--tools/testing/selftests/bpf/progs/syscall.c208
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall1.c7
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall2.c23
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall3.c9
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall4.c9
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall5.c9
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall6.c34
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c7
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c9
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c22
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c35
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c45
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c18
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c18
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c34
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c70
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c62
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c35
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_fail.c64
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_freplace.c23
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_poke.c32
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_common.h77
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_failure.c340
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_success.c419
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_storage.c64
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c35
-rw-r--r--tools/testing/selftests/bpf/progs/task_ls_recursion.c90
-rw-r--r--tools/testing/selftests/bpf/progs/task_ls_uptr.c63
-rw-r--r--tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c47
-rw-r--r--tools/testing/selftests/bpf/progs/tc_bpf2bpf.c25
-rw-r--r--tools/testing/selftests/bpf/progs/tc_dummy.c12
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c29
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c121
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c21
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_update.c74
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c65
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_rtt.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_access_variable_array.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_assign_reuse.c142
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c177
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe_manual.c53
-rw-r--r--tools/testing/selftests/bpf/progs/test_autoattach.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_cookie.c137
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_ma.c285
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf.c346
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c149
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_decl_tag.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_ext.c22
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_haskv.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_map_in_map.c14
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_newkv.c17
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_nokv.c14
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c100
-rw-r--r--tools/testing/selftests/bpf/progs/test_build_id.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c71
-rw-r--r--tools/testing/selftests/bpf/progs/test_cgroup_link.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_check_mtu.c290
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.h11
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c981
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_autosize.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_extern.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_read_macros.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_existence.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_mods.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_module.c104
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_size.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c49
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c63
-rw-r--r--tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_d_path_check_types.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_deny_namespace.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_enable_stats.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_fill_link_info.c61
-rw-r--r--tools/testing/selftests/bpf/progs/test_fsverity.c48
-rw-r--r--tools/testing/selftests/bpf/progs/test_get_xattr.c84
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_data.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func1.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func10.c35
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func11.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func12.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func13.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func14.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func15.c56
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func16.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func17.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func2.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func3.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func4.c55
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func5.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func6.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func7.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func8.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func9.c134
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_args.c91
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c172
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_map_resize.c94
-rw-r--r--tools/testing/selftests/bpf/progs/test_hash_large_key.c44
-rw-r--r--tools/testing/selftests/bpf/progs/test_helper_restricted.c123
-rw-r--r--tools/testing/selftests/bpf/progs/test_jhash.h31
-rw-r--r--tools/testing/selftests/bpf/progs/test_kernel_flag.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c86
-rw-r--r--tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c43
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c44
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_module.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_weak.c71
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c487
-rw-r--r--tools/testing/selftests/bpf/progs/test_ldsx_insn.c125
-rw-r--r--tools/testing/selftests/bpf/progs/test_legacy_printk.c73
-rw-r--r--tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c37
-rw-r--r--tools/testing/selftests/bpf/progs/test_log_buf.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_log_fixup.c74
-rw-r--r--tools/testing/selftests/bpf/progs/test_lookup_and_delete.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_lookup_key.c46
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_redirect.c90
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_reroute.c36
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_seg6local.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map.c39
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_init.c33
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lock.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c76
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_ops.c138
-rw-r--r--tools/testing/selftests/bpf/progs/test_migrate_reuseport.c135
-rw-r--r--tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_mmap.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c142
-rw-r--r--tools/testing/selftests/bpf/progs/test_netfilter_link_attach.c14
-rw-r--r--tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c55
-rw-r--r--tools/testing/selftests/bpf/progs/test_obj_id.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c118
-rw-r--r--tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c114
-rw-r--r--tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_buffer.c22
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_link.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_skip.c15
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning_invalid.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_access.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_md_access.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_probe_read_user_str.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_probe_user.c43
-rw-r--r--tools/testing/selftests/bpf/progs/test_prog_array_init.c39
-rw-r--r--tools/testing/selftests/bpf/progs/test_ptr_untrusted.c29
-rw-r--r--tools/testing/selftests/bpf/progs/test_queue_stack_map.h4
-rw-r--r--tools/testing/selftests/bpf/progs/test_rdonly_maps.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c71
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_multi.c15
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_n.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_write.c46
-rw-r--r--tools/testing/selftests/bpf/progs/test_seg6_loop.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_send_signal_kern.c37
-rw-r--r--tools/testing/selftests/bpf/progs/test_set_remove_xattr.c133
-rw-r--r--tools/testing/selftests/bpf/progs/test_sig_in_xattr.c87
-rw-r--r--tools/testing/selftests/bpf/progs/test_siphash.h64
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign.c22
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup.c135
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c41
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_storage_trace_itself.c29
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c111
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_ctx.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c40
-rw-r--r--tools/testing/selftests/bpf/progs/test_skeleton.c41
-rw-r--r--tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_snprintf.c77
-rw-r--r--tools/testing/selftests/bpf/progs/test_snprintf_single.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields.c73
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c45
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_kern.h67
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_ktls.c40
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_listen.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_progs_query.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_redir.c68
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_strp.c53
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_update.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock.c72
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock_fail.c317
-rw-r--r--tools/testing/selftests/bpf/progs/test_stack_var_off.c51
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c17
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_map.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_static_linked1.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_static_linked2.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs_extable.c51
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs_unused.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_subskeleton.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_subskeleton_lib.c61
-rw-r--r--tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop1.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop2.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_prog.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_pt_regs.c36
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_under_cgroup.c77
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_bpf.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_change_tail.c106
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_dtime.c392
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_edt.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_link.c129
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh.c37
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_peer.c56
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_tunnel.c210
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c167
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c591
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h138
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_estats.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c131
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_time_tai.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_tracepoint.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_trampoline_count.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c787
-rw-r--r--tools/testing/selftests/bpf/progs/test_unpriv_bpf_disabled.c83
-rw-r--r--tools/testing/selftests/bpf/progs/test_uprobe.c61
-rw-r--r--tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c117
-rw-r--r--tools/testing/selftests/bpf/progs/test_urandom_usdt.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_usdt.c110
-rw-r--r--tools/testing/selftests/bpf/progs/test_usdt_multispec.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_user_ringbuf.h35
-rw-r--r--tools/testing/selftests/bpf/progs/test_varlen.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale1.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale2.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale3.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c92
-rw-r--r--tools/testing/selftests/bpf/progs/test_vmlinux.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c36
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_attach_fail.c54
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_context_test_run.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c128
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_dynptr.c256
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_link.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_loop.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_meta.c57
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c111
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_redirect.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_update_frags.c42
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_vlan.c39
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_frags_helpers.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_devmap_frags_helpers.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c15
-rw-r--r--tools/testing/selftests/bpf/progs/timer.c425
-rw-r--r--tools/testing/selftests/bpf/progs/timer_crash.c54
-rw-r--r--tools/testing/selftests/bpf/progs/timer_failure.c68
-rw-r--r--tools/testing/selftests/bpf/progs/timer_lockup.c87
-rw-r--r--tools/testing/selftests/bpf/progs/timer_mim.c88
-rw-r--r--tools/testing/selftests/bpf/progs/timer_mim_reject.c74
-rw-r--r--tools/testing/selftests/bpf/progs/token_lsm.c32
-rw-r--r--tools/testing/selftests/bpf/progs/trace_dummy_st_ops.c21
-rw-r--r--tools/testing/selftests/bpf/progs/trace_printk.c7
-rw-r--r--tools/testing/selftests/bpf/progs/trace_vprintk.c34
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_failure.c32
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_struct.c133
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_struct_many_args.c95
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c122
-rw-r--r--tools/testing/selftests/bpf/progs/twfw.c58
-rw-r--r--tools/testing/selftests/bpf/progs/type_cast.c79
-rw-r--r--tools/testing/selftests/bpf/progs/udp_limit.c2
-rw-r--r--tools/testing/selftests/bpf/progs/uninit_stack.c88
-rw-r--r--tools/testing/selftests/bpf/progs/unsupported_ops.c22
-rw-r--r--tools/testing/selftests/bpf/progs/update_map_in_htab.c30
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi.c143
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_bench.c15
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c39
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c40
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_session.c71
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c48
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c44
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c44
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c16
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c31
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_syscall.c15
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c17
-rw-r--r--tools/testing/selftests/bpf/progs/uptr_failure.c105
-rw-r--r--tools/testing/selftests/bpf/progs/uptr_map_failure.c27
-rw-r--r--tools/testing/selftests/bpf/progs/uptr_update_failure.c42
-rw-r--r--tools/testing/selftests/bpf/progs/uretprobe_stack.c96
-rw-r--r--tools/testing/selftests/bpf/progs/user_ringbuf_fail.c245
-rw-r--r--tools/testing/selftests/bpf/progs/user_ringbuf_success.c212
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_and.c113
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena.c257
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena_large.c273
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_array_access.c731
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_basic_stack.c100
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bitfield_write.c100
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bits_iter.c232
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds.c1671
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c174
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c639
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c554
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c883
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c124
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_trap.c71
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bswap.c63
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c80
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_btf_unreliable_prog.c20
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cfg.c162
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c89
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c227
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c308
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_const.c98
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_const_or.c82
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx.c267
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c228
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_d_path.c48
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c803
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c56
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_div0.c213
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_div_overflow.c144
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c310
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_subprogs.c391
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_gotol.c63
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c825
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c550
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_restricted.c279
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_value_access.c1282
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_int_ptr.c155
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c786
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c213
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_jit_convergence.c114
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c170
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ld_ind.c110
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ldsx.c271
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_leak_ptr.c92
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_linked_scalars.c34
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_load_acquire.c234
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_loops1.c286
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_lsm.c162
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_lwt.c234
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_in_map.c260
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_ptr.c159
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c265
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_ret_val.c110
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_masking.c410
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_may_goto_1.c127
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_may_goto_2.c28
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_meta_access.c284
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_movsx.c354
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_mtu.c20
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c121
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c49
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c41
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_precision.c304
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c61
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_private_stack.c359
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_raw_stack.c372
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c50
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ref_tracking.c1495
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_reg_equal.c58
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_regalloc.c364
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ringbuf.c131
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_runtime_jit.c360
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_scalar_ids.c830
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sdiv.c1224
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_search_pruning.c362
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sock.c1096
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sock_addr.c331
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c187
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spill_fill.c1282
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spin_lock.c559
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_stack_ptr.c536
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_store_release.c301
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_precision.c796
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subreg.c673
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_tailcall.c31
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c105
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_typedef.c23
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_uninit.c61
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_unpriv.c953
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c34
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value.c158
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c78
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c168
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_or_null.c288
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c1441
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_var_off.c418
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_vfs_accept.c103
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_vfs_reject.c176
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_xadd.c124
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_xdp.c24
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c1722
-rw-r--r--tools/testing/selftests/bpf/progs/vrf_socket_lookup.c89
-rw-r--r--tools/testing/selftests/bpf/progs/wq.c189
-rw-r--r--tools/testing/selftests/bpf/progs/wq_failures.c144
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_dummy.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_features.c268
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_flowtable.c148
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_hw_metadata.c117
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_metadata.c111
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_metadata2.c24
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_redirect_map.c94
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c111
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c865
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_tx.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xdping_kern.c9
-rw-r--r--tools/testing/selftests/bpf/progs/xdpwall.c364
-rw-r--r--tools/testing/selftests/bpf/progs/xfrm_info.c41
-rw-r--r--tools/testing/selftests/bpf/progs/xsk_xdp_progs.c123
794 files changed, 90181 insertions, 2181 deletions
diff --git a/tools/testing/selftests/bpf/progs/access_map_in_map.c b/tools/testing/selftests/bpf/progs/access_map_in_map.c
new file mode 100644
index 000000000000..1126871c2ebd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/access_map_in_map.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <time.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+struct inner_map_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+} inner_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+ __array(values, struct inner_map_type);
+} outer_array_map SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+ __array(values, struct inner_map_type);
+} outer_htab_map SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
+
+char _license[] SEC("license") = "GPL";
+
+int tgid = 0;
+
+static int acc_map_in_map(void *outer_map)
+{
+ int i, key, value = 0xdeadbeef;
+ void *inner_map;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != tgid)
+ return 0;
+
+ /* Find nonexistent inner map */
+ key = 1;
+ inner_map = bpf_map_lookup_elem(outer_map, &key);
+ if (inner_map)
+ return 0;
+
+ /* Find the old inner map */
+ key = 0;
+ inner_map = bpf_map_lookup_elem(outer_map, &key);
+ if (!inner_map)
+ return 0;
+
+ /* Wait for the old inner map to be replaced */
+ for (i = 0; i < 2048; i++)
+ bpf_map_update_elem(inner_map, &key, &value, 0);
+
+ return 0;
+}
+
+SEC("?kprobe/" SYS_PREFIX "sys_getpgid")
+int access_map_in_array(void *ctx)
+{
+ return acc_map_in_map(&outer_array_map);
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int sleepable_access_map_in_array(void *ctx)
+{
+ return acc_map_in_map(&outer_array_map);
+}
+
+SEC("?kprobe/" SYS_PREFIX "sys_getpgid")
+int access_map_in_htab(void *ctx)
+{
+ return acc_map_in_map(&outer_htab_map);
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int sleepable_access_map_in_htab(void *ctx)
+{
+ return acc_map_in_map(&outer_htab_map);
+}
diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c
new file mode 100644
index 000000000000..a52feff98112
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_atomics.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include <stdatomic.h>
+#include "bpf_arena_common.h"
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 10); /* number of pages */
+#ifdef __TARGET_ARCH_arm64
+ __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
+#else
+ __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
+#endif
+} arena SEC(".maps");
+
+#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+bool skip_all_tests __attribute((__section__(".data"))) = false;
+#else
+bool skip_all_tests = true;
+#endif
+
+#if defined(ENABLE_ATOMICS_TESTS) && \
+ defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
+ (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86))
+bool skip_lacq_srel_tests __attribute((__section__(".data"))) = false;
+#else
+bool skip_lacq_srel_tests = true;
+#endif
+
+__u32 pid = 0;
+
+__u64 __arena_global add64_value = 1;
+__u64 __arena_global add64_result = 0;
+__u32 __arena_global add32_value = 1;
+__u32 __arena_global add32_result = 0;
+__u64 __arena_global add_stack_value_copy = 0;
+__u64 __arena_global add_stack_result = 0;
+__u64 __arena_global add_noreturn_value = 1;
+
+SEC("raw_tp/sys_enter")
+int add(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 add_stack_value = 1;
+
+ add64_result = __sync_fetch_and_add(&add64_value, 2);
+ add32_result = __sync_fetch_and_add(&add32_value, 2);
+ add_stack_result = __sync_fetch_and_add(&add_stack_value, 2);
+ add_stack_value_copy = add_stack_value;
+ __sync_fetch_and_add(&add_noreturn_value, 2);
+#endif
+
+ return 0;
+}
+
+__s64 __arena_global sub64_value = 1;
+__s64 __arena_global sub64_result = 0;
+__s32 __arena_global sub32_value = 1;
+__s32 __arena_global sub32_result = 0;
+__s64 __arena_global sub_stack_value_copy = 0;
+__s64 __arena_global sub_stack_result = 0;
+__s64 __arena_global sub_noreturn_value = 1;
+
+SEC("raw_tp/sys_enter")
+int sub(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 sub_stack_value = 1;
+
+ sub64_result = __sync_fetch_and_sub(&sub64_value, 2);
+ sub32_result = __sync_fetch_and_sub(&sub32_value, 2);
+ sub_stack_result = __sync_fetch_and_sub(&sub_stack_value, 2);
+ sub_stack_value_copy = sub_stack_value;
+ __sync_fetch_and_sub(&sub_noreturn_value, 2);
+#endif
+
+ return 0;
+}
+
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+_Atomic __u64 __arena_global and64_value = (0x110ull << 32);
+_Atomic __u32 __arena_global and32_value = 0x110;
+#else
+__u64 __arena_global and64_value = (0x110ull << 32);
+__u32 __arena_global and32_value = 0x110;
+#endif
+
+SEC("raw_tp/sys_enter")
+int and(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+ __c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed);
+ __c11_atomic_fetch_and(&and32_value, 0x011, memory_order_relaxed);
+#else
+ __sync_fetch_and_and(&and64_value, 0x011ull << 32);
+ __sync_fetch_and_and(&and32_value, 0x011);
+#endif
+#endif
+
+ return 0;
+}
+
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+_Atomic __u32 __arena_global or32_value = 0x110;
+_Atomic __u64 __arena_global or64_value = (0x110ull << 32);
+#else
+__u32 __arena_global or32_value = 0x110;
+__u64 __arena_global or64_value = (0x110ull << 32);
+#endif
+
+SEC("raw_tp/sys_enter")
+int or(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+ __c11_atomic_fetch_or(&or64_value, 0x011ull << 32, memory_order_relaxed);
+ __c11_atomic_fetch_or(&or32_value, 0x011, memory_order_relaxed);
+#else
+ __sync_fetch_and_or(&or64_value, 0x011ull << 32);
+ __sync_fetch_and_or(&or32_value, 0x011);
+#endif
+#endif
+
+ return 0;
+}
+
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+_Atomic __u64 __arena_global xor64_value = (0x110ull << 32);
+_Atomic __u32 __arena_global xor32_value = 0x110;
+#else
+__u64 __arena_global xor64_value = (0x110ull << 32);
+__u32 __arena_global xor32_value = 0x110;
+#endif
+
+SEC("raw_tp/sys_enter")
+int xor(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+ __c11_atomic_fetch_xor(&xor64_value, 0x011ull << 32, memory_order_relaxed);
+ __c11_atomic_fetch_xor(&xor32_value, 0x011, memory_order_relaxed);
+#else
+ __sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
+ __sync_fetch_and_xor(&xor32_value, 0x011);
+#endif
+#endif
+
+ return 0;
+}
+
+__u32 __arena_global cmpxchg32_value = 1;
+__u32 __arena_global cmpxchg32_result_fail = 0;
+__u32 __arena_global cmpxchg32_result_succeed = 0;
+__u64 __arena_global cmpxchg64_value = 1;
+__u64 __arena_global cmpxchg64_result_fail = 0;
+__u64 __arena_global cmpxchg64_result_succeed = 0;
+
+SEC("raw_tp/sys_enter")
+int cmpxchg(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ cmpxchg64_result_fail = __sync_val_compare_and_swap(&cmpxchg64_value, 0, 3);
+ cmpxchg64_result_succeed = __sync_val_compare_and_swap(&cmpxchg64_value, 1, 2);
+
+ cmpxchg32_result_fail = __sync_val_compare_and_swap(&cmpxchg32_value, 0, 3);
+ cmpxchg32_result_succeed = __sync_val_compare_and_swap(&cmpxchg32_value, 1, 2);
+#endif
+
+ return 0;
+}
+
+__u64 __arena_global xchg64_value = 1;
+__u64 __arena_global xchg64_result = 0;
+__u32 __arena_global xchg32_value = 1;
+__u32 __arena_global xchg32_result = 0;
+
+SEC("raw_tp/sys_enter")
+int xchg(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 val64 = 2;
+ __u32 val32 = 2;
+
+ xchg64_result = __sync_lock_test_and_set(&xchg64_value, val64);
+ xchg32_result = __sync_lock_test_and_set(&xchg32_value, val32);
+#endif
+
+ return 0;
+}
+
+__u64 __arena_global uaf_sink;
+volatile __u64 __arena_global uaf_recovery_fails;
+
+SEC("syscall")
+int uaf(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64) && \
+ !defined(__TARGET_ARCH_x86)
+ __u32 __arena *page32;
+ __u64 __arena *page64;
+ void __arena *page;
+
+ page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ bpf_arena_free_pages(&arena, page, 1);
+ uaf_recovery_fails = 24;
+
+ page32 = (__u32 __arena *)page;
+ uaf_sink += __sync_fetch_and_add(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_add_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_sub(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_sub_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_and(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_and_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_or(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_or_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_xor(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_xor_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_val_compare_and_swap(page32, 0, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_lock_test_and_set(page32, 1);
+ uaf_recovery_fails -= 1;
+
+ page64 = (__u64 __arena *)page;
+ uaf_sink += __sync_fetch_and_add(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_add_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_sub(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_sub_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_and(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_and_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_or(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_or_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_xor(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_xor_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_val_compare_and_swap(page64, 0, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_lock_test_and_set(page64, 1);
+ uaf_recovery_fails -= 1;
+#endif
+
+ return 0;
+}
+
+#if __clang_major__ >= 18
+__u8 __arena_global load_acquire8_value = 0x12;
+__u16 __arena_global load_acquire16_value = 0x1234;
+__u32 __arena_global load_acquire32_value = 0x12345678;
+__u64 __arena_global load_acquire64_value = 0x1234567890abcdef;
+
+__u8 __arena_global load_acquire8_result = 0;
+__u16 __arena_global load_acquire16_result = 0;
+__u32 __arena_global load_acquire32_result = 0;
+__u64 __arena_global load_acquire64_result = 0;
+#else
+/* clang-17 crashes if the .addr_space.1 ELF section has holes. Work around
+ * this issue by defining the below variables as 64-bit.
+ */
+__u64 __arena_global load_acquire8_value;
+__u64 __arena_global load_acquire16_value;
+__u64 __arena_global load_acquire32_value;
+__u64 __arena_global load_acquire64_value;
+
+__u64 __arena_global load_acquire8_result;
+__u64 __arena_global load_acquire16_result;
+__u64 __arena_global load_acquire32_result;
+__u64 __arena_global load_acquire64_result;
+#endif
+
+SEC("raw_tp/sys_enter")
+int load_acquire(const void *ctx)
+{
+#if defined(ENABLE_ATOMICS_TESTS) && \
+ defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
+ (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86))
+
+#define LOAD_ACQUIRE_ARENA(SIZEOP, SIZE, SRC, DST) \
+ { asm volatile ( \
+ "r1 = %[" #SRC "] ll;" \
+ "r1 = addr_space_cast(r1, 0x0, 0x1);" \
+ ".8byte %[load_acquire_insn];" \
+ "r3 = %[" #DST "] ll;" \
+ "r3 = addr_space_cast(r3, 0x0, 0x1);" \
+ "*(" #SIZE " *)(r3 + 0) = r2;" \
+ : \
+ : __imm_addr(SRC), \
+ __imm_insn(load_acquire_insn, \
+ BPF_ATOMIC_OP(BPF_##SIZEOP, BPF_LOAD_ACQ, \
+ BPF_REG_2, BPF_REG_1, 0)), \
+ __imm_addr(DST) \
+ : __clobber_all); } \
+
+ LOAD_ACQUIRE_ARENA(B, u8, load_acquire8_value, load_acquire8_result)
+ LOAD_ACQUIRE_ARENA(H, u16, load_acquire16_value,
+ load_acquire16_result)
+ LOAD_ACQUIRE_ARENA(W, u32, load_acquire32_value,
+ load_acquire32_result)
+ LOAD_ACQUIRE_ARENA(DW, u64, load_acquire64_value,
+ load_acquire64_result)
+#undef LOAD_ACQUIRE_ARENA
+
+#endif
+ return 0;
+}
+
+#if __clang_major__ >= 18
+__u8 __arena_global store_release8_result = 0;
+__u16 __arena_global store_release16_result = 0;
+__u32 __arena_global store_release32_result = 0;
+__u64 __arena_global store_release64_result = 0;
+#else
+/* clang-17 crashes if the .addr_space.1 ELF section has holes. Work around
+ * this issue by defining the below variables as 64-bit.
+ */
+__u64 __arena_global store_release8_result;
+__u64 __arena_global store_release16_result;
+__u64 __arena_global store_release32_result;
+__u64 __arena_global store_release64_result;
+#endif
+
+SEC("raw_tp/sys_enter")
+int store_release(const void *ctx)
+{
+#if defined(ENABLE_ATOMICS_TESTS) && \
+ defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
+ (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86))
+
+#define STORE_RELEASE_ARENA(SIZEOP, DST, VAL) \
+ { asm volatile ( \
+ "r1 = " VAL ";" \
+ "r2 = %[" #DST "] ll;" \
+ "r2 = addr_space_cast(r2, 0x0, 0x1);" \
+ ".8byte %[store_release_insn];" \
+ : \
+ : __imm_addr(DST), \
+ __imm_insn(store_release_insn, \
+ BPF_ATOMIC_OP(BPF_##SIZEOP, BPF_STORE_REL, \
+ BPF_REG_2, BPF_REG_1, 0)) \
+ : __clobber_all); } \
+
+ STORE_RELEASE_ARENA(B, store_release8_result, "0x12")
+ STORE_RELEASE_ARENA(H, store_release16_result, "0x1234")
+ STORE_RELEASE_ARENA(W, store_release32_result, "0x12345678")
+ STORE_RELEASE_ARENA(DW, store_release64_result,
+ "0x1234567890abcdef ll")
+#undef STORE_RELEASE_ARENA
+
+#endif
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/arena_htab.c b/tools/testing/selftests/bpf/progs/arena_htab.c
new file mode 100644
index 000000000000..81eaa94afeb0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_htab.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 100); /* number of pages */
+} arena SEC(".maps");
+
+#include "bpf_arena_htab.h"
+
+void __arena *htab_for_user;
+bool skip = false;
+
+int zero = 0;
+char __arena arr1[100000];
+char arr2[1000];
+
+SEC("syscall")
+int arena_htab_llvm(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) || defined(BPF_ARENA_FORCE_ASM)
+ struct htab __arena *htab;
+ char __arena *arr = arr1;
+ __u64 i;
+
+ htab = bpf_alloc(sizeof(*htab));
+ cast_kern(htab);
+ htab_init(htab);
+
+ cast_kern(arr);
+
+ /* first run. No old elems in the table */
+ for (i = zero; i < 100000 && can_loop; i++) {
+ htab_update_elem(htab, i, i);
+ arr[i] = i;
+ }
+
+ /* should replace some elems with new ones */
+ for (i = zero; i < 1000 && can_loop; i++) {
+ htab_update_elem(htab, i, i);
+ /* Access mem to make the verifier use bounded loop logic */
+ arr2[i] = i;
+ }
+ cast_user(htab);
+ htab_for_user = htab;
+#else
+ skip = true;
+#endif
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/arena_htab_asm.c b/tools/testing/selftests/bpf/progs/arena_htab_asm.c
new file mode 100644
index 000000000000..6cd70ea12f0d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_htab_asm.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_ARENA_FORCE_ASM
+#define arena_htab_llvm arena_htab_asm
+#include "arena_htab.c"
diff --git a/tools/testing/selftests/bpf/progs/arena_list.c b/tools/testing/selftests/bpf/progs/arena_list.c
new file mode 100644
index 000000000000..3a2ddcacbea6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_list.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 100); /* number of pages */
+#ifdef __TARGET_ARCH_arm64
+ __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
+#else
+ __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
+#endif
+} arena SEC(".maps");
+
+#include "bpf_arena_alloc.h"
+#include "bpf_arena_list.h"
+
+struct elem {
+ struct arena_list_node node;
+ __u64 value;
+};
+
+struct arena_list_head __arena *list_head;
+int list_sum;
+int cnt;
+bool skip = false;
+
+#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
+long __arena arena_sum;
+int __arena test_val = 1;
+struct arena_list_head __arena global_head;
+#else
+long arena_sum SEC(".addr_space.1");
+int test_val SEC(".addr_space.1");
+#endif
+
+int zero;
+
+SEC("syscall")
+int arena_list_add(void *ctx)
+{
+#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
+ __u64 i;
+
+ list_head = &global_head;
+
+ for (i = zero; i < cnt && can_loop; i++) {
+ struct elem __arena *n = bpf_alloc(sizeof(*n));
+
+ test_val++;
+ n->value = i;
+ arena_sum += i;
+ list_add_head(&n->node, list_head);
+ }
+#else
+ skip = true;
+#endif
+ return 0;
+}
+
+SEC("syscall")
+int arena_list_del(void *ctx)
+{
+#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
+ struct elem __arena *n;
+ int sum = 0;
+
+ arena_sum = 0;
+ list_for_each_entry(n, list_head, node) {
+ sum += n->value;
+ arena_sum += n->value;
+ list_del(&n->node);
+ bpf_free(n);
+ }
+ list_sum = sum;
+#else
+ skip = true;
+#endif
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/arena_spin_lock.c b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
new file mode 100644
index 000000000000..c4500c37f85e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_arena_spin_lock.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 100); /* number of pages */
+#ifdef __TARGET_ARCH_arm64
+ __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
+#else
+ __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
+#endif
+} arena SEC(".maps");
+
+int cs_count;
+
+#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+arena_spinlock_t __arena lock;
+int test_skip = 1;
+#else
+int test_skip = 2;
+#endif
+
+int counter;
+int limit;
+
+SEC("tc")
+int prog(void *ctx)
+{
+ int ret = -2;
+
+#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ unsigned long flags;
+
+ if ((ret = arena_spin_lock_irqsave(&lock, flags)))
+ return ret;
+ if (counter != limit)
+ counter++;
+ bpf_repeat(cs_count);
+ ret = 0;
+ arena_spin_unlock_irqrestore(&lock, flags);
+#endif
+ return ret;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/async_stack_depth.c b/tools/testing/selftests/bpf/progs/async_stack_depth.c
new file mode 100644
index 000000000000..36734683acbd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/async_stack_depth.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+struct hmap_elem {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 64);
+ __type(key, int);
+ __type(value, struct hmap_elem);
+} hmap SEC(".maps");
+
+__attribute__((noinline))
+static int timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+ volatile char buf[256] = {};
+ return buf[69];
+}
+
+__attribute__((noinline))
+static int bad_timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+ volatile char buf[300] = {};
+ return buf[255] + timer_cb(NULL, NULL, NULL);
+}
+
+SEC("tc")
+__failure __msg("combined stack size of 2 calls is")
+int pseudo_call_check(struct __sk_buff *ctx)
+{
+ struct hmap_elem *elem;
+ volatile char buf[256] = {};
+
+ elem = bpf_map_lookup_elem(&hmap, &(int){0});
+ if (!elem)
+ return 0;
+
+ timer_cb(NULL, NULL, NULL);
+ return bpf_timer_set_callback(&elem->timer, timer_cb) + buf[0];
+}
+
+SEC("tc")
+__failure __msg("combined stack size of 2 calls is")
+int async_call_root_check(struct __sk_buff *ctx)
+{
+ struct hmap_elem *elem;
+ volatile char buf[256] = {};
+
+ elem = bpf_map_lookup_elem(&hmap, &(int){0});
+ if (!elem)
+ return 0;
+
+ return bpf_timer_set_callback(&elem->timer, bad_timer_cb) + buf[0];
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/atomic_bounds.c b/tools/testing/selftests/bpf/progs/atomic_bounds.c
new file mode 100644
index 000000000000..e5fff7fc7f8f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/atomic_bounds.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+
+#ifdef ENABLE_ATOMICS_TESTS
+bool skip_tests __attribute((__section__(".data"))) = false;
+#else
+bool skip_tests = true;
+#endif
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(sub, int x)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+ int a = 0;
+ int b = __sync_fetch_and_add(&a, 1);
+ /* b is certainly 0 here. Can the verifier tell? */
+ while (b)
+ continue;
+#endif
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/atomics.c b/tools/testing/selftests/bpf/progs/atomics.c
new file mode 100644
index 000000000000..f89c7f0cc53b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/atomics.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+
+#ifdef ENABLE_ATOMICS_TESTS
+bool skip_tests __attribute((__section__(".data"))) = false;
+#else
+bool skip_tests = true;
+#endif
+
+__u32 pid = 0;
+
+__u64 add64_value = 1;
+__u64 add64_result = 0;
+__u32 add32_value = 1;
+__u32 add32_result = 0;
+__u64 add_stack_value_copy = 0;
+__u64 add_stack_result = 0;
+__u64 add_noreturn_value = 1;
+
+SEC("raw_tp/sys_enter")
+int add(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 add_stack_value = 1;
+
+ add64_result = __sync_fetch_and_add(&add64_value, 2);
+ add32_result = __sync_fetch_and_add(&add32_value, 2);
+ add_stack_result = __sync_fetch_and_add(&add_stack_value, 2);
+ add_stack_value_copy = add_stack_value;
+ __sync_fetch_and_add(&add_noreturn_value, 2);
+#endif
+
+ return 0;
+}
+
+__s64 sub64_value = 1;
+__s64 sub64_result = 0;
+__s32 sub32_value = 1;
+__s32 sub32_result = 0;
+__s64 sub_stack_value_copy = 0;
+__s64 sub_stack_result = 0;
+__s64 sub_noreturn_value = 1;
+
+SEC("raw_tp/sys_enter")
+int sub(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 sub_stack_value = 1;
+
+ sub64_result = __sync_fetch_and_sub(&sub64_value, 2);
+ sub32_result = __sync_fetch_and_sub(&sub32_value, 2);
+ sub_stack_result = __sync_fetch_and_sub(&sub_stack_value, 2);
+ sub_stack_value_copy = sub_stack_value;
+ __sync_fetch_and_sub(&sub_noreturn_value, 2);
+#endif
+
+ return 0;
+}
+
+__u64 and64_value = (0x110ull << 32);
+__u64 and64_result = 0;
+__u32 and32_value = 0x110;
+__u32 and32_result = 0;
+__u64 and_noreturn_value = (0x110ull << 32);
+
+SEC("raw_tp/sys_enter")
+int and(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+
+ and64_result = __sync_fetch_and_and(&and64_value, 0x011ull << 32);
+ and32_result = __sync_fetch_and_and(&and32_value, 0x011);
+ __sync_fetch_and_and(&and_noreturn_value, 0x011ull << 32);
+#endif
+
+ return 0;
+}
+
+__u64 or64_value = (0x110ull << 32);
+__u64 or64_result = 0;
+__u32 or32_value = 0x110;
+__u32 or32_result = 0;
+__u64 or_noreturn_value = (0x110ull << 32);
+
+SEC("raw_tp/sys_enter")
+int or(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ or64_result = __sync_fetch_and_or(&or64_value, 0x011ull << 32);
+ or32_result = __sync_fetch_and_or(&or32_value, 0x011);
+ __sync_fetch_and_or(&or_noreturn_value, 0x011ull << 32);
+#endif
+
+ return 0;
+}
+
+__u64 xor64_value = (0x110ull << 32);
+__u64 xor64_result = 0;
+__u32 xor32_value = 0x110;
+__u32 xor32_result = 0;
+__u64 xor_noreturn_value = (0x110ull << 32);
+
+SEC("raw_tp/sys_enter")
+int xor(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ xor64_result = __sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
+ xor32_result = __sync_fetch_and_xor(&xor32_value, 0x011);
+ __sync_fetch_and_xor(&xor_noreturn_value, 0x011ull << 32);
+#endif
+
+ return 0;
+}
+
+__u64 cmpxchg64_value = 1;
+__u64 cmpxchg64_result_fail = 0;
+__u64 cmpxchg64_result_succeed = 0;
+__u32 cmpxchg32_value = 1;
+__u32 cmpxchg32_result_fail = 0;
+__u32 cmpxchg32_result_succeed = 0;
+
+SEC("raw_tp/sys_enter")
+int cmpxchg(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ cmpxchg64_result_fail = __sync_val_compare_and_swap(&cmpxchg64_value, 0, 3);
+ cmpxchg64_result_succeed = __sync_val_compare_and_swap(&cmpxchg64_value, 1, 2);
+
+ cmpxchg32_result_fail = __sync_val_compare_and_swap(&cmpxchg32_value, 0, 3);
+ cmpxchg32_result_succeed = __sync_val_compare_and_swap(&cmpxchg32_value, 1, 2);
+#endif
+
+ return 0;
+}
+
+__u64 xchg64_value = 1;
+__u64 xchg64_result = 0;
+__u32 xchg32_value = 1;
+__u32 xchg32_result = 0;
+
+SEC("raw_tp/sys_enter")
+int xchg(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 val64 = 2;
+ __u32 val32 = 2;
+
+ xchg64_result = __sync_lock_test_and_set(&xchg64_value, val64);
+ xchg32_result = __sync_lock_test_and_set(&xchg32_value, val32);
+#endif
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops.c b/tools/testing/selftests/bpf/progs/bad_struct_ops.c
new file mode 100644
index 000000000000..b3f77b4561c8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bad_struct_ops.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1) { return 0; }
+
+SEC("struct_ops/test_2")
+int BPF_PROG(test_2) { return 0; }
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops2 testmod_2 = {
+ .test_1 = (void *)test_1
+};
diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops2.c b/tools/testing/selftests/bpf/progs/bad_struct_ops2.c
new file mode 100644
index 000000000000..64a95f6be86d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bad_struct_ops2.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* This is an unused struct_ops program, it lacks corresponding
+ * struct_ops map, which provides attachment information.
+ * W/o additional configuration attempt to load such
+ * BPF object file would fail.
+ */
+SEC("struct_ops/foo")
+void foo(void) {}
diff --git a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
new file mode 100644
index 000000000000..c8ec0d0368e4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+long create_errs = 0;
+long create_cnts = 0;
+long kmalloc_cnts = 0;
+__u32 bench_pid = 0;
+
+struct storage {
+ __u8 data[64];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct storage);
+} sk_storage_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct storage);
+} task_storage_map SEC(".maps");
+
+SEC("raw_tp/kmalloc")
+int BPF_PROG(kmalloc, unsigned long call_site, const void *ptr,
+ size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags,
+ int node)
+{
+ __sync_fetch_and_add(&kmalloc_cnts, 1);
+
+ return 0;
+}
+
+SEC("tp_btf/sched_process_fork")
+int BPF_PROG(sched_process_fork, struct task_struct *parent, struct task_struct *child)
+{
+ struct storage *stg;
+
+ if (parent->tgid != bench_pid)
+ return 0;
+
+ stg = bpf_task_storage_get(&task_storage_map, child, NULL,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (stg)
+ __sync_fetch_and_add(&create_cnts, 1);
+ else
+ __sync_fetch_and_add(&create_errs, 1);
+
+ return 0;
+}
+
+SEC("lsm.s/socket_post_create")
+int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
+ int protocol, int kern)
+{
+ struct sock *sk = sock->sk;
+ struct storage *stg;
+ __u32 pid;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != bench_pid || !sk)
+ return 0;
+
+ stg = bpf_sk_storage_get(&sk_storage_map, sk, NULL,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+
+ if (stg)
+ __sync_fetch_and_add(&create_cnts, 1);
+ else
+ __sync_fetch_and_add(&create_errs, 1);
+
+ return 0;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c b/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c
new file mode 100644
index 000000000000..079bf3794b3a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+long process_byte = 0;
+int verdict_dir = 0;
+int dropped = 0;
+int pkt_size = 0;
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_rx SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_tx SEC(".maps");
+
+SEC("sk_skb/stream_parser")
+int prog_skb_parser(struct __sk_buff *skb)
+{
+ return pkt_size;
+}
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ int one = 1;
+ int ret = bpf_sk_redirect_map(skb, &sock_map_rx, one, verdict_dir);
+
+ if (ret == SK_DROP)
+ dropped++;
+ __sync_fetch_and_add(&process_byte, skb->len);
+ return ret;
+}
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_pass(struct __sk_buff *skb)
+{
+ __sync_fetch_and_add(&process_byte, skb->len);
+ return SK_PASS;
+}
+
+SEC("sk_msg")
+int prog_skmsg_verdict(struct sk_msg_md *msg)
+{
+ int one = 1;
+
+ __sync_fetch_and_add(&process_byte, msg->size);
+ return bpf_msg_redirect_map(msg, &sock_map_tx, one, verdict_dir);
+}
+
+SEC("sk_msg")
+int prog_skmsg_pass(struct sk_msg_md *msg)
+{
+ __sync_fetch_and_add(&process_byte, msg->size);
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bind4_prog.c b/tools/testing/selftests/bpf/progs/bind4_prog.c
new file mode 100644
index 000000000000..b7ddf8ec4ee8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bind4_prog.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <string.h>
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/if.h>
+#include <errno.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include "bind_prog.h"
+
+#define SERV4_IP 0xc0a801feU /* 192.168.1.254 */
+#define SERV4_PORT 4040
+#define SERV4_REWRITE_IP 0x7f000001U /* 127.0.0.1 */
+#define SERV4_REWRITE_PORT 4444
+
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+
+static __inline int bind_to_device(struct bpf_sock_addr *ctx)
+{
+ char veth1[IFNAMSIZ] = "test_sock_addr1";
+ char veth2[IFNAMSIZ] = "test_sock_addr2";
+ char missing[IFNAMSIZ] = "nonexistent_dev";
+ char del_bind[IFNAMSIZ] = "";
+ int veth1_idx, veth2_idx;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
+ &veth1, sizeof(veth1)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &veth1_idx, sizeof(veth1_idx)) || !veth1_idx)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
+ &veth2, sizeof(veth2)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &veth2_idx, sizeof(veth2_idx)) || !veth2_idx ||
+ veth1_idx == veth2_idx)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
+ &missing, sizeof(missing)) != -ENODEV)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &veth1_idx, sizeof(veth1_idx)))
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
+ &del_bind, sizeof(del_bind)))
+ return 1;
+
+ return 0;
+}
+
+static __inline int bind_reuseport(struct bpf_sock_addr *ctx)
+{
+ int val = 1;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)) || !val)
+ return 1;
+ val = 0;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)) || val)
+ return 1;
+
+ return 0;
+}
+
+static __inline int misc_opts(struct bpf_sock_addr *ctx, int opt)
+{
+ int old, tmp, new = 0xeb9f;
+
+ /* Socket in test case has guarantee that old never equals to new. */
+ if (bpf_getsockopt(ctx, SOL_SOCKET, opt, &old, sizeof(old)) ||
+ old == new)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, opt, &new, sizeof(new)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, opt, &tmp, sizeof(tmp)) ||
+ tmp != new)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, opt, &old, sizeof(old)))
+ return 1;
+
+ return 0;
+}
+
+SEC("cgroup/bind4")
+int bind_v4_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock *sk;
+ __u32 user_ip4;
+ __u16 user_port;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 0;
+
+ if (sk->family != AF_INET)
+ return 0;
+
+ if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
+ return 0;
+
+ if (ctx->user_ip4 != bpf_htonl(SERV4_IP) ||
+ ctx->user_port != bpf_htons(SERV4_PORT))
+ return 0;
+
+ // u8 narrow loads:
+ user_ip4 = 0;
+ user_ip4 |= load_byte(ctx->user_ip4, 0, sizeof(user_ip4));
+ user_ip4 |= load_byte(ctx->user_ip4, 1, sizeof(user_ip4));
+ user_ip4 |= load_byte(ctx->user_ip4, 2, sizeof(user_ip4));
+ user_ip4 |= load_byte(ctx->user_ip4, 3, sizeof(user_ip4));
+ if (ctx->user_ip4 != user_ip4)
+ return 0;
+
+ user_port = 0;
+ user_port |= load_byte(ctx->user_port, 0, sizeof(user_port));
+ user_port |= load_byte(ctx->user_port, 1, sizeof(user_port));
+ if (ctx->user_port != user_port)
+ return 0;
+
+ // u16 narrow loads:
+ user_ip4 = 0;
+ user_ip4 |= load_word(ctx->user_ip4, 0, sizeof(user_ip4));
+ user_ip4 |= load_word(ctx->user_ip4, 1, sizeof(user_ip4));
+ if (ctx->user_ip4 != user_ip4)
+ return 0;
+
+ /* Bind to device and unbind it. */
+ if (bind_to_device(ctx))
+ return 0;
+
+ /* Test for misc socket options. */
+ if (misc_opts(ctx, SO_MARK) || misc_opts(ctx, SO_PRIORITY))
+ return 0;
+
+ /* Set reuseport and unset */
+ if (bind_reuseport(ctx))
+ return 0;
+
+ ctx->user_ip4 = bpf_htonl(SERV4_REWRITE_IP);
+ ctx->user_port = bpf_htons(SERV4_REWRITE_PORT);
+
+ return 1;
+}
+
+SEC("cgroup/bind4")
+int bind_v4_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bind6_prog.c b/tools/testing/selftests/bpf/progs/bind6_prog.c
new file mode 100644
index 000000000000..501c3fc11d35
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bind6_prog.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <string.h>
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/if.h>
+#include <errno.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include "bind_prog.h"
+
+#define SERV6_IP_0 0xfaceb00c /* face:b00c:1234:5678::abcd */
+#define SERV6_IP_1 0x12345678
+#define SERV6_IP_2 0x00000000
+#define SERV6_IP_3 0x0000abcd
+#define SERV6_PORT 6060
+#define SERV6_REWRITE_IP_0 0x00000000
+#define SERV6_REWRITE_IP_1 0x00000000
+#define SERV6_REWRITE_IP_2 0x00000000
+#define SERV6_REWRITE_IP_3 0x00000001
+#define SERV6_REWRITE_PORT 6666
+
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+
+static __inline int bind_to_device(struct bpf_sock_addr *ctx)
+{
+ char veth1[IFNAMSIZ] = "test_sock_addr1";
+ char veth2[IFNAMSIZ] = "test_sock_addr2";
+ char missing[IFNAMSIZ] = "nonexistent_dev";
+ char del_bind[IFNAMSIZ] = "";
+ int veth1_idx, veth2_idx;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
+ &veth1, sizeof(veth1)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &veth1_idx, sizeof(veth1_idx)) || !veth1_idx)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
+ &veth2, sizeof(veth2)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &veth2_idx, sizeof(veth2_idx)) || !veth2_idx ||
+ veth1_idx == veth2_idx)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
+ &missing, sizeof(missing)) != -ENODEV)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &veth1_idx, sizeof(veth1_idx)))
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
+ &del_bind, sizeof(del_bind)))
+ return 1;
+
+ return 0;
+}
+
+static __inline int bind_reuseport(struct bpf_sock_addr *ctx)
+{
+ int val = 1;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)) || !val)
+ return 1;
+ val = 0;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)) || val)
+ return 1;
+
+ return 0;
+}
+
+static __inline int misc_opts(struct bpf_sock_addr *ctx, int opt)
+{
+ int old, tmp, new = 0xeb9f;
+
+ /* Socket in test case has guarantee that old never equals to new. */
+ if (bpf_getsockopt(ctx, SOL_SOCKET, opt, &old, sizeof(old)) ||
+ old == new)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, opt, &new, sizeof(new)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, opt, &tmp, sizeof(tmp)) ||
+ tmp != new)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, opt, &old, sizeof(old)))
+ return 1;
+
+ return 0;
+}
+
+SEC("cgroup/bind6")
+int bind_v6_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock *sk;
+ __u32 user_ip6;
+ __u16 user_port;
+ int i;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 0;
+
+ if (sk->family != AF_INET6)
+ return 0;
+
+ if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
+ return 0;
+
+ if (ctx->user_ip6[0] != bpf_htonl(SERV6_IP_0) ||
+ ctx->user_ip6[1] != bpf_htonl(SERV6_IP_1) ||
+ ctx->user_ip6[2] != bpf_htonl(SERV6_IP_2) ||
+ ctx->user_ip6[3] != bpf_htonl(SERV6_IP_3) ||
+ ctx->user_port != bpf_htons(SERV6_PORT))
+ return 0;
+
+ // u8 narrow loads:
+ for (i = 0; i < 4; i++) {
+ user_ip6 = 0;
+ user_ip6 |= load_byte(ctx->user_ip6[i], 0, sizeof(user_ip6));
+ user_ip6 |= load_byte(ctx->user_ip6[i], 1, sizeof(user_ip6));
+ user_ip6 |= load_byte(ctx->user_ip6[i], 2, sizeof(user_ip6));
+ user_ip6 |= load_byte(ctx->user_ip6[i], 3, sizeof(user_ip6));
+ if (ctx->user_ip6[i] != user_ip6)
+ return 0;
+ }
+
+ user_port = 0;
+ user_port |= load_byte(ctx->user_port, 0, sizeof(user_port));
+ user_port |= load_byte(ctx->user_port, 1, sizeof(user_port));
+ if (ctx->user_port != user_port)
+ return 0;
+
+ // u16 narrow loads:
+ for (i = 0; i < 4; i++) {
+ user_ip6 = 0;
+ user_ip6 |= load_word(ctx->user_ip6[i], 0, sizeof(user_ip6));
+ user_ip6 |= load_word(ctx->user_ip6[i], 1, sizeof(user_ip6));
+ if (ctx->user_ip6[i] != user_ip6)
+ return 0;
+ }
+
+ /* Bind to device and unbind it. */
+ if (bind_to_device(ctx))
+ return 0;
+
+ /* Test for misc socket options. */
+ if (misc_opts(ctx, SO_MARK) || misc_opts(ctx, SO_PRIORITY))
+ return 0;
+
+ /* Set reuseport and unset */
+ if (bind_reuseport(ctx))
+ return 0;
+
+ ctx->user_ip6[0] = bpf_htonl(SERV6_REWRITE_IP_0);
+ ctx->user_ip6[1] = bpf_htonl(SERV6_REWRITE_IP_1);
+ ctx->user_ip6[2] = bpf_htonl(SERV6_REWRITE_IP_2);
+ ctx->user_ip6[3] = bpf_htonl(SERV6_REWRITE_IP_3);
+ ctx->user_port = bpf_htons(SERV6_REWRITE_PORT);
+
+ return 1;
+}
+
+SEC("cgroup/bind6")
+int bind_v6_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bind_perm.c b/tools/testing/selftests/bpf/progs/bind_perm.c
new file mode 100644
index 000000000000..7bd2a027025d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bind_perm.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+static __always_inline int bind_prog(struct bpf_sock_addr *ctx, int family)
+{
+ struct bpf_sock *sk;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 0;
+
+ if (sk->family != family)
+ return 0;
+
+ if (ctx->type != SOCK_STREAM)
+ return 0;
+
+ /* Return 1 OR'ed with the first bit set to indicate
+ * that CAP_NET_BIND_SERVICE should be bypassed.
+ */
+ if (ctx->user_port == bpf_htons(111))
+ return (1 | 2);
+
+ return 1;
+}
+
+SEC("cgroup/bind4")
+int bind_v4_prog(struct bpf_sock_addr *ctx)
+{
+ return bind_prog(ctx, AF_INET);
+}
+
+SEC("cgroup/bind6")
+int bind_v6_prog(struct bpf_sock_addr *ctx)
+{
+ return bind_prog(ctx, AF_INET6);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bind_prog.h b/tools/testing/selftests/bpf/progs/bind_prog.h
new file mode 100644
index 000000000000..e830caa940c3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bind_prog.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BIND_PROG_H__
+#define __BIND_PROG_H__
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define load_byte(src, b, s) \
+ (((volatile __u8 *)&(src))[b] << 8 * b)
+#define load_word(src, w, s) \
+ (((volatile __u16 *)&(src))[w] << 16 * w)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define load_byte(src, b, s) \
+ (((volatile __u8 *)&(src))[(b) + (sizeof(src) - (s))] << 8 * ((s) - (b) - 1))
+#define load_word(src, w, s) \
+ (((volatile __u16 *)&(src))[w] << 16 * (((s) / 2) - (w) - 1))
+#else
+# error "Fix your compiler's __BYTE_ORDER__?!"
+#endif
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/bloom_filter_bench.c b/tools/testing/selftests/bpf/progs/bloom_filter_bench.c
new file mode 100644
index 000000000000..7efcbdbe772d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bloom_filter_bench.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <errno.h>
+#include <linux/bpf.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct bpf_map;
+
+__u8 rand_vals[2500000];
+const __u32 nr_rand_bytes = 2500000;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ /* max entries and value_size will be set programmatically.
+ * They are configurable from the userspace bench program.
+ */
+} array_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_BLOOM_FILTER);
+ /* max entries, value_size, and # of hash functions will be set
+ * programmatically. They are configurable from the userspace
+ * bench program.
+ */
+ __uint(map_extra, 3);
+} bloom_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ /* max entries, key_size, and value_size, will be set
+ * programmatically. They are configurable from the userspace
+ * bench program.
+ */
+} hashmap SEC(".maps");
+
+struct callback_ctx {
+ struct bpf_map *map;
+ bool update;
+};
+
+/* Tracks the number of hits, drops, and false hits */
+struct {
+ __u32 stats[3];
+} __attribute__((__aligned__(256))) percpu_stats[256];
+
+const __u32 hit_key = 0;
+const __u32 drop_key = 1;
+const __u32 false_hit_key = 2;
+
+__u8 value_size;
+
+const volatile bool hashmap_use_bloom;
+const volatile bool count_false_hits;
+
+int error = 0;
+
+static __always_inline void log_result(__u32 key)
+{
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ percpu_stats[cpu & 255].stats[key]++;
+}
+
+static __u64
+bloom_callback(struct bpf_map *map, __u32 *key, void *val,
+ struct callback_ctx *data)
+{
+ int err;
+
+ if (data->update)
+ err = bpf_map_push_elem(data->map, val, 0);
+ else
+ err = bpf_map_peek_elem(data->map, val);
+
+ if (err) {
+ error |= 1;
+ return 1; /* stop the iteration */
+ }
+
+ log_result(hit_key);
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int bloom_lookup(void *ctx)
+{
+ struct callback_ctx data;
+
+ data.map = (struct bpf_map *)&bloom_map;
+ data.update = false;
+
+ bpf_for_each_map_elem(&array_map, bloom_callback, &data, 0);
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int bloom_update(void *ctx)
+{
+ struct callback_ctx data;
+
+ data.map = (struct bpf_map *)&bloom_map;
+ data.update = true;
+
+ bpf_for_each_map_elem(&array_map, bloom_callback, &data, 0);
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int bloom_hashmap_lookup(void *ctx)
+{
+ __u64 *result;
+ int i, err;
+
+ __u32 index = bpf_get_prandom_u32();
+ __u32 bitmask = (1ULL << 21) - 1;
+
+ for (i = 0; i < 1024; i++, index += value_size) {
+ index = index & bitmask;
+
+ if (hashmap_use_bloom) {
+ err = bpf_map_peek_elem(&bloom_map,
+ rand_vals + index);
+ if (err) {
+ if (err != -ENOENT) {
+ error |= 2;
+ return 0;
+ }
+ log_result(hit_key);
+ continue;
+ }
+ }
+
+ result = bpf_map_lookup_elem(&hashmap,
+ rand_vals + index);
+ if (result) {
+ log_result(hit_key);
+ } else {
+ if (hashmap_use_bloom && count_false_hits)
+ log_result(false_hit_key);
+ log_result(drop_key);
+ }
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bloom_filter_map.c b/tools/testing/selftests/bpf/progs/bloom_filter_map.c
new file mode 100644
index 000000000000..f245fcfe0c61
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bloom_filter_map.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct bpf_map;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1000);
+} map_random_data SEC(".maps");
+
+struct map_bloom_type {
+ __uint(type, BPF_MAP_TYPE_BLOOM_FILTER);
+ __type(value, __u32);
+ __uint(max_entries, 10000);
+ __uint(map_extra, 5);
+} map_bloom SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+ __array(values, struct map_bloom_type);
+} outer_map SEC(".maps");
+
+struct callback_ctx {
+ struct bpf_map *map;
+};
+
+int error = 0;
+
+static __u64
+check_elem(struct bpf_map *map, __u32 *key, __u32 *val,
+ struct callback_ctx *data)
+{
+ int err;
+
+ err = bpf_map_peek_elem(data->map, val);
+ if (err) {
+ error |= 1;
+ return 1; /* stop the iteration */
+ }
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int inner_map(void *ctx)
+{
+ struct bpf_map *inner_map;
+ struct callback_ctx data;
+ int key = 0;
+
+ inner_map = bpf_map_lookup_elem(&outer_map, &key);
+ if (!inner_map) {
+ error |= 2;
+ return 0;
+ }
+
+ data.map = inner_map;
+ bpf_for_each_map_elem(&map_random_data, check_elem, &data, 0);
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int check_bloom(void *ctx)
+{
+ struct callback_ctx data;
+
+ data.map = (struct bpf_map *)&map_bloom;
+ bpf_for_each_map_elem(&map_random_data, check_elem, &data, 0);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
new file mode 100644
index 000000000000..d67466c1ff77
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#ifndef BPF_ARENA_SPIN_LOCK_H
+#define BPF_ARENA_SPIN_LOCK_H
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_atomic.h"
+
+#define arch_mcs_spin_lock_contended_label(l, label) smp_cond_load_acquire_label(l, VAL, label)
+#define arch_mcs_spin_unlock_contended(l) smp_store_release((l), 1)
+
+#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+
+#define EBUSY 16
+#define EOPNOTSUPP 95
+#define ETIMEDOUT 110
+
+#ifndef __arena
+#define __arena __attribute__((address_space(1)))
+#endif
+
+extern unsigned long CONFIG_NR_CPUS __kconfig;
+
+/*
+ * Typically, we'd just rely on the definition in vmlinux.h for qspinlock, but
+ * PowerPC overrides the definition to define lock->val as u32 instead of
+ * atomic_t, leading to compilation errors. Import a local definition below so
+ * that we don't depend on the vmlinux.h version.
+ */
+
+struct __qspinlock {
+ union {
+ atomic_t val;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ struct {
+ u8 locked;
+ u8 pending;
+ };
+ struct {
+ u16 locked_pending;
+ u16 tail;
+ };
+#else
+ struct {
+ u16 tail;
+ u16 locked_pending;
+ };
+ struct {
+ u8 reserved[2];
+ u8 pending;
+ u8 locked;
+ };
+#endif
+ };
+};
+
+#define arena_spinlock_t struct __qspinlock
+/* FIXME: Using typedef causes CO-RE relocation error */
+/* typedef struct qspinlock arena_spinlock_t; */
+
+struct arena_mcs_spinlock {
+ struct arena_mcs_spinlock __arena *next;
+ int locked;
+ int count;
+};
+
+struct arena_qnode {
+ struct arena_mcs_spinlock mcs;
+};
+
+#define _Q_MAX_NODES 4
+#define _Q_PENDING_LOOPS 1
+
+/*
+ * Bitfields in the atomic value:
+ *
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-15: not used
+ * 16-17: tail index
+ * 18-31: tail cpu (+1)
+ */
+#define _Q_MAX_CPUS 1024
+
+#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
+ << _Q_ ## type ## _OFFSET)
+#define _Q_LOCKED_OFFSET 0
+#define _Q_LOCKED_BITS 8
+#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
+
+#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
+#define _Q_PENDING_BITS 8
+#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
+
+#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
+#define _Q_TAIL_IDX_BITS 2
+#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
+
+#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
+#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
+#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
+
+#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
+#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
+
+#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
+#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
+
+struct arena_qnode __arena qnodes[_Q_MAX_CPUS][_Q_MAX_NODES];
+
+static inline u32 encode_tail(int cpu, int idx)
+{
+ u32 tail;
+
+ tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
+ tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
+
+ return tail;
+}
+
+static inline struct arena_mcs_spinlock __arena *decode_tail(u32 tail)
+{
+ u32 cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
+ u32 idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
+
+ return &qnodes[cpu][idx].mcs;
+}
+
+static inline
+struct arena_mcs_spinlock __arena *grab_mcs_node(struct arena_mcs_spinlock __arena *base, int idx)
+{
+ return &((struct arena_qnode __arena *)base + idx)->mcs;
+}
+
+#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
+
+/**
+ * xchg_tail - Put in the new queue tail code word & retrieve previous one
+ * @lock : Pointer to queued spinlock structure
+ * @tail : The new queue tail code word
+ * Return: The previous queue tail code word
+ *
+ * xchg(lock, tail)
+ *
+ * p,*,* -> n,*,* ; prev = xchg(lock, node)
+ */
+static __always_inline u32 xchg_tail(arena_spinlock_t __arena *lock, u32 tail)
+{
+ u32 old, new;
+
+ old = atomic_read(&lock->val);
+ do {
+ new = (old & _Q_LOCKED_PENDING_MASK) | tail;
+ /*
+ * We can use relaxed semantics since the caller ensures that
+ * the MCS node is properly initialized before updating the
+ * tail.
+ */
+ /* These loops are not expected to stall, but we still need to
+ * prove to the verifier they will terminate eventually.
+ */
+ cond_break_label(out);
+ } while (!atomic_try_cmpxchg_relaxed(&lock->val, &old, new));
+
+ return old;
+out:
+ bpf_printk("RUNTIME ERROR: %s unexpected cond_break exit!!!", __func__);
+ return old;
+}
+
+/**
+ * clear_pending - clear the pending bit.
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,1,* -> *,0,*
+ */
+static __always_inline void clear_pending(arena_spinlock_t __arena *lock)
+{
+ WRITE_ONCE(lock->pending, 0);
+}
+
+/**
+ * clear_pending_set_locked - take ownership and clear the pending bit.
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,1,0 -> *,0,1
+ *
+ * Lock stealing is not allowed if this function is used.
+ */
+static __always_inline void clear_pending_set_locked(arena_spinlock_t __arena *lock)
+{
+ WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
+}
+
+/**
+ * set_locked - Set the lock bit and own the lock
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,*,0 -> *,0,1
+ */
+static __always_inline void set_locked(arena_spinlock_t __arena *lock)
+{
+ WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
+}
+
+static __always_inline
+u32 arena_fetch_set_pending_acquire(arena_spinlock_t __arena *lock)
+{
+ u32 old, new;
+
+ old = atomic_read(&lock->val);
+ do {
+ new = old | _Q_PENDING_VAL;
+ /*
+ * These loops are not expected to stall, but we still need to
+ * prove to the verifier they will terminate eventually.
+ */
+ cond_break_label(out);
+ } while (!atomic_try_cmpxchg_acquire(&lock->val, &old, new));
+
+ return old;
+out:
+ bpf_printk("RUNTIME ERROR: %s unexpected cond_break exit!!!", __func__);
+ return old;
+}
+
+/**
+ * arena_spin_trylock - try to acquire the queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int arena_spin_trylock(arena_spinlock_t __arena *lock)
+{
+ int val = atomic_read(&lock->val);
+
+ if (unlikely(val))
+ return 0;
+
+ return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
+}
+
+__noinline
+int arena_spin_lock_slowpath(arena_spinlock_t __arena __arg_arena *lock, u32 val)
+{
+ struct arena_mcs_spinlock __arena *prev, *next, *node0, *node;
+ int ret = -ETIMEDOUT;
+ u32 old, tail;
+ int idx;
+
+ /*
+ * Wait for in-progress pending->locked hand-overs with a bounded
+ * number of spins so that we guarantee forward progress.
+ *
+ * 0,1,0 -> 0,0,1
+ */
+ if (val == _Q_PENDING_VAL) {
+ int cnt = _Q_PENDING_LOOPS;
+ val = atomic_cond_read_relaxed_label(&lock->val,
+ (VAL != _Q_PENDING_VAL) || !cnt--,
+ release_err);
+ }
+
+ /*
+ * If we observe any contention; queue.
+ */
+ if (val & ~_Q_LOCKED_MASK)
+ goto queue;
+
+ /*
+ * trylock || pending
+ *
+ * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock
+ */
+ val = arena_fetch_set_pending_acquire(lock);
+
+ /*
+ * If we observe contention, there is a concurrent locker.
+ *
+ * Undo and queue; our setting of PENDING might have made the
+ * n,0,0 -> 0,0,0 transition fail and it will now be waiting
+ * on @next to become !NULL.
+ */
+ if (unlikely(val & ~_Q_LOCKED_MASK)) {
+
+ /* Undo PENDING if we set it. */
+ if (!(val & _Q_PENDING_MASK))
+ clear_pending(lock);
+
+ goto queue;
+ }
+
+ /*
+ * We're pending, wait for the owner to go away.
+ *
+ * 0,1,1 -> *,1,0
+ *
+ * this wait loop must be a load-acquire such that we match the
+ * store-release that clears the locked bit and create lock
+ * sequentiality; this is because not all
+ * clear_pending_set_locked() implementations imply full
+ * barriers.
+ */
+ if (val & _Q_LOCKED_MASK)
+ smp_cond_load_acquire_label(&lock->locked, !VAL, release_err);
+
+ /*
+ * take ownership and clear the pending bit.
+ *
+ * 0,1,0 -> 0,0,1
+ */
+ clear_pending_set_locked(lock);
+ return 0;
+
+ /*
+ * End of pending bit optimistic spinning and beginning of MCS
+ * queuing.
+ */
+queue:
+ node0 = &(qnodes[bpf_get_smp_processor_id()])[0].mcs;
+ idx = node0->count++;
+ tail = encode_tail(bpf_get_smp_processor_id(), idx);
+
+ /*
+ * 4 nodes are allocated based on the assumption that there will not be
+ * nested NMIs taking spinlocks. That may not be true in some
+ * architectures even though the chance of needing more than 4 nodes
+ * will still be extremely unlikely. When that happens, we simply return
+ * an error. Original qspinlock has a trylock fallback in this case.
+ */
+ if (unlikely(idx >= _Q_MAX_NODES)) {
+ ret = -EBUSY;
+ goto release_node_err;
+ }
+
+ node = grab_mcs_node(node0, idx);
+
+ /*
+ * Ensure that we increment the head node->count before initialising
+ * the actual node. If the compiler is kind enough to reorder these
+ * stores, then an IRQ could overwrite our assignments.
+ */
+ barrier();
+
+ node->locked = 0;
+ node->next = NULL;
+
+ /*
+ * We touched a (possibly) cold cacheline in the per-cpu queue node;
+ * attempt the trylock once more in the hope someone let go while we
+ * weren't watching.
+ */
+ if (arena_spin_trylock(lock))
+ goto release;
+
+ /*
+ * Ensure that the initialisation of @node is complete before we
+ * publish the updated tail via xchg_tail() and potentially link
+ * @node into the waitqueue via WRITE_ONCE(prev->next, node) below.
+ */
+ smp_wmb();
+
+ /*
+ * Publish the updated tail.
+ * We have already touched the queueing cacheline; don't bother with
+ * pending stuff.
+ *
+ * p,*,* -> n,*,*
+ */
+ old = xchg_tail(lock, tail);
+ next = NULL;
+
+ /*
+ * if there was a previous node; link it and wait until reaching the
+ * head of the waitqueue.
+ */
+ if (old & _Q_TAIL_MASK) {
+ prev = decode_tail(old);
+
+ /* Link @node into the waitqueue. */
+ WRITE_ONCE(prev->next, node);
+
+ arch_mcs_spin_lock_contended_label(&node->locked, release_node_err);
+
+ /*
+ * While waiting for the MCS lock, the next pointer may have
+ * been set by another lock waiter. We cannot prefetch here
+ * due to lack of equivalent instruction in BPF ISA.
+ */
+ next = READ_ONCE(node->next);
+ }
+
+ /*
+ * we're at the head of the waitqueue, wait for the owner & pending to
+ * go away.
+ *
+ * *,x,y -> *,0,0
+ *
+ * this wait loop must use a load-acquire such that we match the
+ * store-release that clears the locked bit and create lock
+ * sequentiality; this is because the set_locked() function below
+ * does not imply a full barrier.
+ */
+ val = atomic_cond_read_acquire_label(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK),
+ release_node_err);
+
+ /*
+ * claim the lock:
+ *
+ * n,0,0 -> 0,0,1 : lock, uncontended
+ * *,*,0 -> *,*,1 : lock, contended
+ *
+ * If the queue head is the only one in the queue (lock value == tail)
+ * and nobody is pending, clear the tail code and grab the lock.
+ * Otherwise, we only need to grab the lock.
+ */
+
+ /*
+ * In the PV case we might already have _Q_LOCKED_VAL set, because
+ * of lock stealing; therefore we must also allow:
+ *
+ * n,0,1 -> 0,0,1
+ *
+ * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
+ * above wait condition, therefore any concurrent setting of
+ * PENDING will make the uncontended transition fail.
+ */
+ if ((val & _Q_TAIL_MASK) == tail) {
+ if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
+ goto release; /* No contention */
+ }
+
+ /*
+ * Either somebody is queued behind us or _Q_PENDING_VAL got set
+ * which will then detect the remaining tail and queue behind us
+ * ensuring we'll see a @next.
+ */
+ set_locked(lock);
+
+ /*
+ * contended path; wait for next if not observed yet, release.
+ */
+ if (!next)
+ next = smp_cond_load_relaxed_label(&node->next, (VAL), release_node_err);
+
+ arch_mcs_spin_unlock_contended(&next->locked);
+
+release:;
+ /*
+ * release the node
+ *
+ * Doing a normal dec vs this_cpu_dec is fine. An upper context always
+ * decrements count it incremented before returning, thus we're fine.
+ * For contexts interrupting us, they either observe our dec or not.
+ * Just ensure the compiler doesn't reorder this statement, as a
+ * this_cpu_dec implicitly implied that.
+ */
+ barrier();
+ node0->count--;
+ return 0;
+release_node_err:
+ barrier();
+ node0->count--;
+ goto release_err;
+release_err:
+ return ret;
+}
+
+/**
+ * arena_spin_lock - acquire a queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ *
+ * On error, returned value will be negative.
+ * On success, zero is returned.
+ *
+ * The return value _must_ be tested against zero for success,
+ * instead of checking it against negative, for passing the
+ * BPF verifier.
+ *
+ * The user should do:
+ * if (arena_spin_lock(...) != 0) // failure
+ * or
+ * if (arena_spin_lock(...) == 0) // success
+ * or
+ * if (arena_spin_lock(...)) // failure
+ * or
+ * if (!arena_spin_lock(...)) // success
+ * instead of:
+ * if (arena_spin_lock(...) < 0) // failure
+ *
+ * The return value can still be inspected later.
+ */
+static __always_inline int arena_spin_lock(arena_spinlock_t __arena *lock)
+{
+ int val = 0;
+
+ if (CONFIG_NR_CPUS > 1024)
+ return -EOPNOTSUPP;
+
+ bpf_preempt_disable();
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
+ return 0;
+
+ val = arena_spin_lock_slowpath(lock, val);
+ /* FIXME: bpf_assert_range(-MAX_ERRNO, 0) once we have it working for all cases. */
+ if (val)
+ bpf_preempt_enable();
+ return val;
+}
+
+/**
+ * arena_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static __always_inline void arena_spin_unlock(arena_spinlock_t __arena *lock)
+{
+ /*
+ * unlock() needs release semantics:
+ */
+ smp_store_release(&lock->locked, 0);
+ bpf_preempt_enable();
+}
+
+#define arena_spin_lock_irqsave(lock, flags) \
+ ({ \
+ int __ret; \
+ bpf_local_irq_save(&(flags)); \
+ __ret = arena_spin_lock((lock)); \
+ if (__ret) \
+ bpf_local_irq_restore(&(flags)); \
+ (__ret); \
+ })
+
+#define arena_spin_unlock_irqrestore(lock, flags) \
+ ({ \
+ arena_spin_unlock((lock)); \
+ bpf_local_irq_restore(&(flags)); \
+ })
+
+#endif
+
+#endif /* BPF_ARENA_SPIN_LOCK_H */
diff --git a/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
new file mode 100644
index 000000000000..1654a530aa3d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Highlights:
+ * 1. The major difference between this bpf program and tcp_cubic.c
+ * is that this bpf program relies on `cong_control` rather than
+ * `cong_avoid` in the struct tcp_congestion_ops.
+ * 2. Logic such as tcp_cwnd_reduction, tcp_cong_avoid, and
+ * tcp_update_pacing_rate is bypassed when `cong_control` is
+ * defined, so moving these logic to `cong_control`.
+ * 3. WARNING: This bpf program is NOT the same as tcp_cubic.c.
+ * The main purpose is to show use cases of the arguments in
+ * `cong_control`. For simplicity's sake, it reuses tcp cubic's
+ * kernel functions.
+ */
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define USEC_PER_SEC 1000000UL
+#define TCP_PACING_SS_RATIO (200)
+#define TCP_PACING_CA_RATIO (120)
+#define TCP_REORDERING (12)
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define after(seq2, seq1) before(seq1, seq2)
+
+extern void cubictcp_init(struct sock *sk) __ksym;
+extern void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym;
+extern __u32 cubictcp_recalc_ssthresh(struct sock *sk) __ksym;
+extern void cubictcp_state(struct sock *sk, __u8 new_state) __ksym;
+extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
+extern void cubictcp_acked(struct sock *sk, const struct ack_sample *sample) __ksym;
+extern void cubictcp_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
+
+static bool before(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq1-seq2) < 0;
+}
+
+static __u64 div64_u64(__u64 dividend, __u64 divisor)
+{
+ return dividend / divisor;
+}
+
+static void tcp_update_pacing_rate(struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ __u64 rate;
+
+ /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
+ rate = (__u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3);
+
+ /* current rate is (cwnd * mss) / srtt
+ * In Slow Start [1], set sk_pacing_rate to 200 % the current rate.
+ * In Congestion Avoidance phase, set it to 120 % the current rate.
+ *
+ * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh)
+ * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
+ * end of slow start and should slow down.
+ */
+ if (tp->snd_cwnd < tp->snd_ssthresh / 2)
+ rate *= TCP_PACING_SS_RATIO;
+ else
+ rate *= TCP_PACING_CA_RATIO;
+
+ rate *= max(tp->snd_cwnd, tp->packets_out);
+
+ if (tp->srtt_us)
+ rate = div64_u64(rate, (__u64)tp->srtt_us);
+
+ sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate);
+}
+
+static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
+ int newly_lost, int flag)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int sndcnt = 0;
+ __u32 pkts_in_flight = tp->packets_out - (tp->sacked_out + tp->lost_out) + tp->retrans_out;
+ int delta = tp->snd_ssthresh - pkts_in_flight;
+
+ if (newly_acked_sacked <= 0 || !tp->prior_cwnd)
+ return;
+
+ __u32 prr_delivered = tp->prr_delivered + newly_acked_sacked;
+
+ if (delta < 0) {
+ __u64 dividend =
+ (__u64)tp->snd_ssthresh * prr_delivered + tp->prior_cwnd - 1;
+ sndcnt = (__u32)div64_u64(dividend, (__u64)tp->prior_cwnd) - tp->prr_out;
+ } else {
+ sndcnt = max(prr_delivered - tp->prr_out, newly_acked_sacked);
+ if (flag & FLAG_SND_UNA_ADVANCED && !newly_lost)
+ sndcnt++;
+ sndcnt = min(delta, sndcnt);
+ }
+ /* Force a fast retransmit upon entering fast recovery */
+ sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
+ tp->snd_cwnd = pkts_in_flight + sndcnt;
+}
+
+/* Decide wheather to run the increase function of congestion control. */
+static bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
+{
+ if (tcp_sk(sk)->reordering > TCP_REORDERING)
+ return flag & FLAG_FORWARD_PROGRESS;
+
+ return flag & FLAG_DATA_ACKED;
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_init, struct sock *sk)
+{
+ cubictcp_init(sk);
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
+{
+ cubictcp_cwnd_event(sk, event);
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_cong_control, struct sock *sk, __u32 ack, int flag,
+ const struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (((1<<TCP_CA_CWR) | (1<<TCP_CA_Recovery)) &
+ (1 << inet_csk(sk)->icsk_ca_state)) {
+ /* Reduce cwnd if state mandates */
+ tcp_cwnd_reduction(sk, rs->acked_sacked, rs->losses, flag);
+
+ if (!before(tp->snd_una, tp->high_seq)) {
+ /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
+ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
+ inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
+ tp->snd_cwnd = tp->snd_ssthresh;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
+ }
+ }
+ } else if (tcp_may_raise_cwnd(sk, flag)) {
+ /* Advance cwnd if state allows */
+ cubictcp_cong_avoid(sk, ack, rs->acked_sacked);
+ tp->snd_cwnd_stamp = tcp_jiffies32;
+ }
+
+ tcp_update_pacing_rate(sk);
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_cubic_recalc_ssthresh, struct sock *sk)
+{
+ return cubictcp_recalc_ssthresh(sk);
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_state, struct sock *sk, __u8 new_state)
+{
+ cubictcp_state(sk, new_state);
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample)
+{
+ cubictcp_acked(sk, sample);
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_cubic_undo_cwnd, struct sock *sk)
+{
+ return tcp_reno_undo_cwnd(sk);
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops cc_cubic = {
+ .init = (void *)bpf_cubic_init,
+ .ssthresh = (void *)bpf_cubic_recalc_ssthresh,
+ .cong_control = (void *)bpf_cubic_cong_control,
+ .set_state = (void *)bpf_cubic_state,
+ .undo_cwnd = (void *)bpf_cubic_undo_cwnd,
+ .cwnd_event = (void *)bpf_cubic_cwnd_event,
+ .pkts_acked = (void *)bpf_cubic_acked,
+ .name = "bpf_cc_cubic",
+};
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_compiler.h b/tools/testing/selftests/bpf/progs/bpf_compiler.h
new file mode 100644
index 000000000000..a7c343dc82e6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_compiler.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BPF_COMPILER_H__
+#define __BPF_COMPILER_H__
+
+#define DO_PRAGMA_(X) _Pragma(#X)
+
+#if __clang__
+#define __pragma_loop_unroll DO_PRAGMA_(clang loop unroll(enable))
+#else
+/* In GCC -funroll-loops, which is enabled with -O2, should have the
+ same impact than the loop-unroll-enable pragma above. */
+#define __pragma_loop_unroll
+#endif
+
+#if __clang__
+#define __pragma_loop_unroll_count(N) DO_PRAGMA_(clang loop unroll_count(N))
+#else
+#define __pragma_loop_unroll_count(N) DO_PRAGMA_(GCC unroll N)
+#endif
+
+#if __clang__
+#define __pragma_loop_unroll_full DO_PRAGMA_(clang loop unroll(full))
+#else
+#define __pragma_loop_unroll_full DO_PRAGMA_(GCC unroll 65534)
+#endif
+
+#if __clang__
+#define __pragma_loop_no_unroll DO_PRAGMA_(clang loop unroll(disable))
+#else
+#define __pragma_loop_no_unroll DO_PRAGMA_(GCC unroll 1)
+#endif
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
index 6939bfd8690f..f089faa97ae6 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* WARNING: This implemenation is not necessarily the same
+/* WARNING: This implementation is not necessarily the same
* as the tcp_cubic.c. The purpose is mainly for testing
* the kernel BPF logic.
*
@@ -14,14 +14,22 @@
* "ca->ack_cnt / delta" operation.
*/
-#include <linux/bpf.h>
-#include <linux/stddef.h>
-#include <linux/tcp.h>
-#include "bpf_tcp_helpers.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#define max(a, b) ((a) > (b) ? (a) : (b))
+static bool before(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq1-seq2) < 0;
+}
+#define after(seq2, seq1) before(seq1, seq2)
+
+extern __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked) __ksym;
+extern void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) __ksym;
#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
* max_cwnd = snd_cwnd * beta
@@ -70,7 +78,7 @@ static const __u64 cube_factor = (__u64)(1ull << (10+3*BICTCP_HZ))
/ (bic_scale * 10);
/* BIC TCP Parameters */
-struct bictcp {
+struct bpf_bictcp {
__u32 cnt; /* increase cwnd by 1 after ACKs */
__u32 last_max_cwnd; /* last maximum snd_cwnd */
__u32 last_cwnd; /* the last snd_cwnd */
@@ -91,7 +99,7 @@ struct bictcp {
__u32 curr_rtt; /* the minimum rtt of current round */
};
-static inline void bictcp_reset(struct bictcp *ca)
+static void bictcp_reset(struct bpf_bictcp *ca)
{
ca->cnt = 0;
ca->last_max_cwnd = 0;
@@ -112,7 +120,7 @@ extern unsigned long CONFIG_HZ __kconfig;
#define USEC_PER_SEC 1000000UL
#define USEC_PER_JIFFY (USEC_PER_SEC / HZ)
-static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor)
+static __u64 div64_u64(__u64 dividend, __u64 divisor)
{
return dividend / divisor;
}
@@ -120,7 +128,7 @@ static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor)
#define div64_ul div64_u64
#define BITS_PER_U64 (sizeof(__u64) * 8)
-static __always_inline int fls64(__u64 x)
+static int fls64(__u64 x)
{
int num = BITS_PER_U64 - 1;
@@ -153,15 +161,15 @@ static __always_inline int fls64(__u64 x)
return num + 1;
}
-static __always_inline __u32 bictcp_clock_us(const struct sock *sk)
+static __u32 bictcp_clock_us(const struct sock *sk)
{
return tcp_sk(sk)->tcp_mstamp;
}
-static __always_inline void bictcp_hystart_reset(struct sock *sk)
+static void bictcp_hystart_reset(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
ca->round_start = ca->last_ack = bictcp_clock_us(sk);
ca->end_seq = tp->snd_nxt;
@@ -169,15 +177,10 @@ static __always_inline void bictcp_hystart_reset(struct sock *sk)
ca->sample_cnt = 0;
}
-/* "struct_ops/" prefix is not a requirement
- * It will be recognized as BPF_PROG_TYPE_STRUCT_OPS
- * as long as it is used in one of the func ptr
- * under SEC(".struct_ops").
- */
-SEC("struct_ops/bictcp_init")
-void BPF_PROG(bictcp_init, struct sock *sk)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_init, struct sock *sk)
{
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
bictcp_reset(ca);
@@ -188,14 +191,11 @@ void BPF_PROG(bictcp_init, struct sock *sk)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
-/* No prefix in SEC will also work.
- * The remaining tcp-cubic functions have an easier way.
- */
-SEC("no-sec-prefix-bictcp_cwnd_event")
-void BPF_PROG(bictcp_cwnd_event, struct sock *sk, enum tcp_ca_event event)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_TX_START) {
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
__u32 now = tcp_jiffies32;
__s32 delta;
@@ -236,7 +236,7 @@ static const __u8 v[] = {
* Newton-Raphson iteration.
* Avg err ~= 0.195%
*/
-static __always_inline __u32 cubic_root(__u64 a)
+static __u32 cubic_root(__u64 a)
{
__u32 x, b, shift;
@@ -269,8 +269,7 @@ static __always_inline __u32 cubic_root(__u64 a)
/*
* Compute congestion window to use.
*/
-static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
- __u32 acked)
+static void bictcp_update(struct bpf_bictcp *ca, __u32 cwnd, __u32 acked)
{
__u32 delta, bic_target, max_cnt;
__u64 offs, t;
@@ -315,7 +314,7 @@ static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
* (so time^3 is done by using 64 bit)
* and without the support of division of 64bit numbers
* (so all divisions are done by using 32 bit)
- * also NOTE the unit of those veriables
+ * also NOTE the unit of those variables
* time = (t - K) / 2^bictcp_HZ
* c = bic_scale >> 10
* rtt = (srtt >> 3) / HZ
@@ -383,11 +382,11 @@ tcp_friendliness:
ca->cnt = max(ca->cnt, 2U);
}
-/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */
-void BPF_STRUCT_OPS(bictcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
if (!tcp_is_cwnd_limited(sk))
return;
@@ -403,10 +402,11 @@ void BPF_STRUCT_OPS(bictcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
tcp_cong_avoid_ai(tp, ca->cnt, acked);
}
-__u32 BPF_STRUCT_OPS(bictcp_recalc_ssthresh, struct sock *sk)
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_cubic_recalc_ssthresh, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
ca->epoch_start = 0; /* end of epoch */
@@ -420,7 +420,8 @@ __u32 BPF_STRUCT_OPS(bictcp_recalc_ssthresh, struct sock *sk)
return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
}
-void BPF_STRUCT_OPS(bictcp_state, struct sock *sk, __u8 new_state)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_state, struct sock *sk, __u8 new_state)
{
if (new_state == TCP_CA_Loss) {
bictcp_reset(inet_csk_ca(sk));
@@ -439,7 +440,7 @@ void BPF_STRUCT_OPS(bictcp_state, struct sock *sk, __u8 new_state)
* We apply another 100% factor because @rate is doubled at this point.
* We cap the cushion to 1ms.
*/
-static __always_inline __u32 hystart_ack_delay(struct sock *sk)
+static __u32 hystart_ack_delay(struct sock *sk)
{
unsigned long rate;
@@ -450,10 +451,10 @@ static __always_inline __u32 hystart_ack_delay(struct sock *sk)
div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
}
-static __always_inline void hystart_update(struct sock *sk, __u32 delay)
+static void hystart_update(struct sock *sk, __u32 delay)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
__u32 threshold;
if (hystart_detect & HYSTART_ACK_TRAIN) {
@@ -496,14 +497,17 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay)
}
}
-void BPF_STRUCT_OPS(bictcp_acked, struct sock *sk,
- const struct ack_sample *sample)
+int bpf_cubic_acked_called = 0;
+
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample)
{
const struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
__u32 delay;
- /* Some calls are for duplicates without timetamps */
+ bpf_cubic_acked_called = 1;
+ /* Some calls are for duplicates without timestamps */
if (sample->rtt_us < 0)
return;
@@ -525,21 +529,22 @@ void BPF_STRUCT_OPS(bictcp_acked, struct sock *sk,
hystart_update(sk, delay);
}
-__u32 BPF_STRUCT_OPS(tcp_reno_undo_cwnd, struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
+extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
- return max(tp->snd_cwnd, tp->prior_cwnd);
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_cubic_undo_cwnd, struct sock *sk)
+{
+ return tcp_reno_undo_cwnd(sk);
}
SEC(".struct_ops")
struct tcp_congestion_ops cubic = {
- .init = (void *)bictcp_init,
- .ssthresh = (void *)bictcp_recalc_ssthresh,
- .cong_avoid = (void *)bictcp_cong_avoid,
- .set_state = (void *)bictcp_state,
- .undo_cwnd = (void *)tcp_reno_undo_cwnd,
- .cwnd_event = (void *)bictcp_cwnd_event,
- .pkts_acked = (void *)bictcp_acked,
+ .init = (void *)bpf_cubic_init,
+ .ssthresh = (void *)bpf_cubic_recalc_ssthresh,
+ .cong_avoid = (void *)bpf_cubic_cong_avoid,
+ .set_state = (void *)bpf_cubic_state,
+ .undo_cwnd = (void *)bpf_cubic_undo_cwnd,
+ .cwnd_event = (void *)bpf_cubic_cwnd_event,
+ .pkts_acked = (void *)bpf_cubic_acked,
.name = "bpf_cubic",
};
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
index 4dc1a967776a..7cd73e75f52a 100644
--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
@@ -6,18 +6,33 @@
* the kernel BPF logic.
*/
-#include <stddef.h>
-#include <linux/bpf.h>
-#include <linux/types.h>
-#include <linux/stddef.h>
-#include <linux/tcp.h>
+#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "bpf_tcp_helpers.h"
+
+#ifndef EBUSY
+#define EBUSY 16
+#endif
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define min_not_zero(x, y) ({ \
+ typeof(x) __x = (x); \
+ typeof(y) __y = (y); \
+ __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
+static bool before(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq1-seq2) < 0;
+}
char _license[] SEC("license") = "GPL";
+volatile const char fallback_cc[TCP_CA_NAME_MAX];
+const char bpf_dctcp[] = "bpf_dctcp";
+const char tcp_cdg[] = "cdg";
+char cc_res[TCP_CA_NAME_MAX];
+int tcp_cdg_res = 0;
int stg_result = 0;
+int ebusy_cnt = 0;
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
@@ -28,7 +43,7 @@ struct {
#define DCTCP_MAX_ALPHA 1024U
-struct dctcp {
+struct bpf_dctcp {
__u32 old_delivered;
__u32 old_delivered_ce;
__u32 prior_rcv_nxt;
@@ -41,8 +56,7 @@ struct dctcp {
static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
-static __always_inline void dctcp_reset(const struct tcp_sock *tp,
- struct dctcp *ca)
+static void dctcp_reset(const struct tcp_sock *tp, struct bpf_dctcp *ca)
{
ca->next_seq = tp->snd_nxt;
@@ -50,13 +64,40 @@ static __always_inline void dctcp_reset(const struct tcp_sock *tp,
ca->old_delivered_ce = tp->delivered_ce;
}
-SEC("struct_ops/dctcp_init")
-void BPF_PROG(dctcp_init, struct sock *sk)
+SEC("struct_ops")
+void BPF_PROG(bpf_dctcp_init, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
- struct dctcp *ca = inet_csk_ca(sk);
+ struct bpf_dctcp *ca = inet_csk_ca(sk);
int *stg;
+ if (!(tp->ecn_flags & TCP_ECN_OK) && fallback_cc[0]) {
+ /* Switch to fallback */
+ if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ (void *)fallback_cc, sizeof(fallback_cc)) == -EBUSY)
+ ebusy_cnt++;
+
+ /* Switch back to myself and the recurred bpf_dctcp_init()
+ * will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION),
+ * except the last "cdg" one.
+ */
+ if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ (void *)bpf_dctcp, sizeof(bpf_dctcp)) == -EBUSY)
+ ebusy_cnt++;
+
+ /* Switch back to fallback */
+ if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ (void *)fallback_cc, sizeof(fallback_cc)) == -EBUSY)
+ ebusy_cnt++;
+
+ /* Expecting -ENOTSUPP for tcp_cdg_res */
+ tcp_cdg_res = bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ (void *)tcp_cdg, sizeof(tcp_cdg));
+ bpf_getsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ (void *)cc_res, sizeof(cc_res));
+ return;
+ }
+
ca->prior_rcv_nxt = tp->rcv_nxt;
ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
ca->loss_cwnd = 0;
@@ -70,21 +111,21 @@ void BPF_PROG(dctcp_init, struct sock *sk)
dctcp_reset(tp, ca);
}
-SEC("struct_ops/dctcp_ssthresh")
-__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_dctcp_ssthresh, struct sock *sk)
{
- struct dctcp *ca = inet_csk_ca(sk);
+ struct bpf_dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
ca->loss_cwnd = tp->snd_cwnd;
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
}
-SEC("struct_ops/dctcp_update_alpha")
-void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
+SEC("struct_ops")
+void BPF_PROG(bpf_dctcp_update_alpha, struct sock *sk, __u32 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
- struct dctcp *ca = inet_csk_ca(sk);
+ struct bpf_dctcp *ca = inet_csk_ca(sk);
/* Expired RTT */
if (!before(tp->snd_una, ca->next_seq)) {
@@ -110,27 +151,27 @@ void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
}
}
-static __always_inline void dctcp_react_to_loss(struct sock *sk)
+static void dctcp_react_to_loss(struct sock *sk)
{
- struct dctcp *ca = inet_csk_ca(sk);
+ struct bpf_dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
ca->loss_cwnd = tp->snd_cwnd;
tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
}
-SEC("struct_ops/dctcp_state")
-void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
+SEC("struct_ops")
+void BPF_PROG(bpf_dctcp_state, struct sock *sk, __u8 new_state)
{
if (new_state == TCP_CA_Recovery &&
new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
dctcp_react_to_loss(sk);
- /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
+ /* We handle RTO in bpf_dctcp_cwnd_event to ensure that we perform only
* one loss-adjustment per RTT.
*/
}
-static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
+static void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -145,9 +186,8 @@ static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
* S: 0 <- last pkt was non-CE
* 1 <- last pkt was CE
*/
-static __always_inline
-void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
- __u32 *prior_rcv_nxt, __u32 *ce_state)
+static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
+ __u32 *prior_rcv_nxt, __u32 *ce_state)
{
__u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
@@ -167,10 +207,10 @@ void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
dctcp_ece_ack_cwr(sk, new_ce_state);
}
-SEC("struct_ops/dctcp_cwnd_event")
-void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
+SEC("struct_ops")
+void BPF_PROG(bpf_dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
{
- struct dctcp *ca = inet_csk_ca(sk);
+ struct bpf_dctcp *ca = inet_csk_ca(sk);
switch (ev) {
case CA_EVENT_ECN_IS_CE:
@@ -186,49 +226,39 @@ void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
}
}
-SEC("struct_ops/dctcp_cwnd_undo")
-__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_dctcp_cwnd_undo, struct sock *sk)
{
- const struct dctcp *ca = inet_csk_ca(sk);
+ const struct bpf_dctcp *ca = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}
-SEC("struct_ops/tcp_reno_cong_avoid")
-void BPF_PROG(tcp_reno_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
-{
- struct tcp_sock *tp = tcp_sk(sk);
+extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
- if (!tcp_is_cwnd_limited(sk))
- return;
-
- /* In "safe" area, increase. */
- if (tcp_in_slow_start(tp)) {
- acked = tcp_slow_start(tp, acked);
- if (!acked)
- return;
- }
- /* In dangerous area, increase slowly. */
- tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
+SEC("struct_ops")
+void BPF_PROG(bpf_dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
+{
+ tcp_reno_cong_avoid(sk, ack, acked);
}
SEC(".struct_ops")
struct tcp_congestion_ops dctcp_nouse = {
- .init = (void *)dctcp_init,
- .set_state = (void *)dctcp_state,
+ .init = (void *)bpf_dctcp_init,
+ .set_state = (void *)bpf_dctcp_state,
.flags = TCP_CONG_NEEDS_ECN,
.name = "bpf_dctcp_nouse",
};
SEC(".struct_ops")
struct tcp_congestion_ops dctcp = {
- .init = (void *)dctcp_init,
- .in_ack_event = (void *)dctcp_update_alpha,
- .cwnd_event = (void *)dctcp_cwnd_event,
- .ssthresh = (void *)dctcp_ssthresh,
- .cong_avoid = (void *)tcp_reno_cong_avoid,
- .undo_cwnd = (void *)dctcp_cwnd_undo,
- .set_state = (void *)dctcp_state,
+ .init = (void *)bpf_dctcp_init,
+ .in_ack_event = (void *)bpf_dctcp_update_alpha,
+ .cwnd_event = (void *)bpf_dctcp_cwnd_event,
+ .ssthresh = (void *)bpf_dctcp_ssthresh,
+ .cong_avoid = (void *)bpf_dctcp_cong_avoid,
+ .undo_cwnd = (void *)bpf_dctcp_cwnd_undo,
+ .set_state = (void *)bpf_dctcp_state,
.flags = TCP_CONG_NEEDS_ECN,
.name = "bpf_dctcp",
};
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp_release.c b/tools/testing/selftests/bpf/progs/bpf_dctcp_release.c
new file mode 100644
index 000000000000..c91763f248b2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp_release.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+const char cubic[] = "cubic";
+
+SEC("struct_ops")
+void BPF_PROG(dctcp_nouse_release, struct sock *sk)
+{
+ bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ (void *)cubic, sizeof(cubic));
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops dctcp_rel = {
+ .release = (void *)dctcp_nouse_release,
+ .name = "bpf_dctcp_rel",
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 5a65f6b51377..b04e092fac94 100644
--- a/tools/testing/selftests/bpf/progs/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
@@ -19,9 +19,10 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
#define PROG(F) PROG_(F, _##F)
-#define PROG_(NUM, NAME) SEC("flow_dissector/"#NUM) int bpf_func##NAME
+#define PROG_(NUM, NAME) SEC("flow_dissector") int flow_dissector_##NUM
+
+#define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */
/* These are the identifiers of the BPF programs that will be used in tail
* calls. Name is limited to 16 characters, with the terminating character and
@@ -144,6 +145,19 @@ int _dissect(struct __sk_buff *skb)
{
struct bpf_flow_keys *keys = skb->flow_keys;
+ if (keys->n_proto == bpf_htons(ETH_P_IP)) {
+ /* IP traffic from FLOW_CONTINUE_SADDR falls-back to
+ * standard dissector
+ */
+ struct iphdr *iph, _iph;
+
+ iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph);
+ if (iph && iph->ihl == 5 &&
+ iph->saddr == bpf_htonl(FLOW_CONTINUE_SADDR)) {
+ return BPF_FLOW_DISSECTOR_CONTINUE;
+ }
+ }
+
return parse_eth_proto(skb, keys->n_proto);
}
@@ -323,7 +337,7 @@ PROG(IPV6)(struct __sk_buff *skb)
keys->ip_proto = ip6h->nexthdr;
keys->flow_label = ip6_flowlabel(ip6h);
- if (keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
+ if (keys->flow_label && keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
return export_flow_keys(keys, BPF_OK);
return parse_ipv6_proto(skb, ip6h->nexthdr);
@@ -368,6 +382,8 @@ PROG(IPV6FR)(struct __sk_buff *skb)
*/
if (!(keys->flags & BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG))
return export_flow_keys(keys, BPF_OK);
+ } else {
+ return export_flow_keys(keys, BPF_OK);
}
return parse_ipv6_proto(skb, fragh->nexthdr);
diff --git a/tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c b/tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c
new file mode 100644
index 000000000000..56957557e3e1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Bytedance */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define MAX_ENTRIES 1000
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, MAX_ENTRIES);
+} hash_map_bench SEC(".maps");
+
+u64 __attribute__((__aligned__(256))) percpu_time[256];
+u64 nr_loops;
+
+static int loop_update_callback(__u32 index, u32 *key)
+{
+ u64 init_val = 1;
+
+ bpf_map_update_elem(&hash_map_bench, key, &init_val, BPF_ANY);
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int benchmark(void *ctx)
+{
+ u32 cpu = bpf_get_smp_processor_id();
+ u32 key = cpu + MAX_ENTRIES;
+ u64 start_time = bpf_ktime_get_ns();
+
+ bpf_loop(nr_loops, loop_update_callback, &key, 0);
+ percpu_time[cpu & 255] = bpf_ktime_get_ns() - start_time;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c b/tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c
new file mode 100644
index 000000000000..1eb74ddca414
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+} hash_map_bench SEC(".maps");
+
+/* The number of slots to store times */
+#define NR_SLOTS 32
+#define NR_CPUS 256
+#define CPU_MASK (NR_CPUS-1)
+
+/* Configured by userspace */
+u64 nr_entries;
+u64 nr_loops;
+u32 __attribute__((__aligned__(8))) key[NR_CPUS];
+
+/* Filled by us */
+u64 __attribute__((__aligned__(256))) percpu_times_index[NR_CPUS];
+u64 __attribute__((__aligned__(256))) percpu_times[NR_CPUS][NR_SLOTS];
+
+static inline void patch_key(u32 i)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ key[0] = i + 1;
+#else
+ key[0] = __builtin_bswap32(i + 1);
+#endif
+ /* the rest of key is random and is configured by userspace */
+}
+
+static int lookup_callback(__u32 index, u32 *unused)
+{
+ patch_key(index);
+ return bpf_map_lookup_elem(&hash_map_bench, key) ? 0 : 1;
+}
+
+static int loop_lookup_callback(__u32 index, u32 *unused)
+{
+ return bpf_loop(nr_entries, lookup_callback, NULL, 0) ? 0 : 1;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int benchmark(void *ctx)
+{
+ u32 cpu = bpf_get_smp_processor_id();
+ u32 times_index;
+ u64 start_time;
+
+ times_index = percpu_times_index[cpu & CPU_MASK] % NR_SLOTS;
+ start_time = bpf_ktime_get_ns();
+ bpf_loop(nr_loops, loop_lookup_callback, NULL, 0);
+ percpu_times[cpu & CPU_MASK][times_index] = bpf_ktime_get_ns() - start_time;
+ percpu_times_index[cpu & CPU_MASK] += 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h
deleted file mode 100644
index 6a1255465fd6..000000000000
--- a/tools/testing/selftests/bpf/progs/bpf_iter.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2020 Facebook */
-/* "undefine" structs in vmlinux.h, because we "override" them below */
-#define bpf_iter_meta bpf_iter_meta___not_used
-#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used
-#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used
-#define bpf_iter__netlink bpf_iter__netlink___not_used
-#define bpf_iter__task bpf_iter__task___not_used
-#define bpf_iter__task_file bpf_iter__task_file___not_used
-#define bpf_iter__tcp bpf_iter__tcp___not_used
-#define tcp6_sock tcp6_sock___not_used
-#define bpf_iter__udp bpf_iter__udp___not_used
-#define udp6_sock udp6_sock___not_used
-#define bpf_iter__bpf_map_elem bpf_iter__bpf_map_elem___not_used
-#define bpf_iter__bpf_sk_storage_map bpf_iter__bpf_sk_storage_map___not_used
-#define bpf_iter__sockmap bpf_iter__sockmap___not_used
-#define btf_ptr btf_ptr___not_used
-#define BTF_F_COMPACT BTF_F_COMPACT___not_used
-#define BTF_F_NONAME BTF_F_NONAME___not_used
-#define BTF_F_PTR_RAW BTF_F_PTR_RAW___not_used
-#define BTF_F_ZERO BTF_F_ZERO___not_used
-#include "vmlinux.h"
-#undef bpf_iter_meta
-#undef bpf_iter__bpf_map
-#undef bpf_iter__ipv6_route
-#undef bpf_iter__netlink
-#undef bpf_iter__task
-#undef bpf_iter__task_file
-#undef bpf_iter__tcp
-#undef tcp6_sock
-#undef bpf_iter__udp
-#undef udp6_sock
-#undef bpf_iter__bpf_map_elem
-#undef bpf_iter__bpf_sk_storage_map
-#undef bpf_iter__sockmap
-#undef btf_ptr
-#undef BTF_F_COMPACT
-#undef BTF_F_NONAME
-#undef BTF_F_PTR_RAW
-#undef BTF_F_ZERO
-
-struct bpf_iter_meta {
- struct seq_file *seq;
- __u64 session_id;
- __u64 seq_num;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__ipv6_route {
- struct bpf_iter_meta *meta;
- struct fib6_info *rt;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__netlink {
- struct bpf_iter_meta *meta;
- struct netlink_sock *sk;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__task {
- struct bpf_iter_meta *meta;
- struct task_struct *task;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__task_file {
- struct bpf_iter_meta *meta;
- struct task_struct *task;
- __u32 fd;
- struct file *file;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__bpf_map {
- struct bpf_iter_meta *meta;
- struct bpf_map *map;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__tcp {
- struct bpf_iter_meta *meta;
- struct sock_common *sk_common;
- uid_t uid;
-} __attribute__((preserve_access_index));
-
-struct tcp6_sock {
- struct tcp_sock tcp;
- struct ipv6_pinfo inet6;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__udp {
- struct bpf_iter_meta *meta;
- struct udp_sock *udp_sk;
- uid_t uid __attribute__((aligned(8)));
- int bucket __attribute__((aligned(8)));
-} __attribute__((preserve_access_index));
-
-struct udp6_sock {
- struct udp_sock udp;
- struct ipv6_pinfo inet6;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__bpf_map_elem {
- struct bpf_iter_meta *meta;
- struct bpf_map *map;
- void *key;
- void *value;
-};
-
-struct bpf_iter__bpf_sk_storage_map {
- struct bpf_iter_meta *meta;
- struct bpf_map *map;
- struct sock *sk;
- void *value;
-};
-
-struct bpf_iter__sockmap {
- struct bpf_iter_meta *meta;
- struct bpf_map *map;
- void *key;
- struct sock *sk;
-};
-
-struct btf_ptr {
- void *ptr;
- __u32 type_id;
- __u32 flags;
-};
-
-enum {
- BTF_F_COMPACT = (1ULL << 0),
- BTF_F_NONAME = (1ULL << 1),
- BTF_F_PTR_RAW = (1ULL << 2),
- BTF_F_ZERO = (1ULL << 3),
-};
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
index 6286023fd62b..19710cc0f250 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
@@ -1,17 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
-struct key_t {
- int a;
- int b;
- int c;
-};
-
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 3);
@@ -19,13 +13,20 @@ struct {
__type(value, __u64);
} arraymap1 SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 10);
+ __type(key, __u64);
+ __type(value, __u32);
+} hashmap1 SEC(".maps");
+
__u32 key_sum = 0;
__u64 val_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx)
{
- __u32 *key = ctx->key;
+ __u32 *hmap_val, *key = ctx->key;
__u64 *val = ctx->value;
if (key == (void *)0 || val == (void *)0)
@@ -35,6 +36,18 @@ int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx)
bpf_seq_write(ctx->meta->seq, val, sizeof(__u64));
key_sum += *key;
val_sum += *val;
+
+ /* workaround - It's necessary to do this convoluted (val, key)
+ * write into hashmap1, instead of simply doing
+ * bpf_map_update_elem(&hashmap1, val, key, BPF_ANY);
+ * because key has MEM_RDONLY flag and bpf_map_update elem expects
+ * types without this flag
+ */
+ bpf_map_update_elem(&hashmap1, val, val, BPF_ANY);
+ hmap_val = bpf_map_lookup_elem(&hashmap1, val);
+ if (hmap_val)
+ *hmap_val = *key;
+
*val = *key;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
index 6dfce3fd68bc..f47da665f7e0 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
@@ -113,3 +112,12 @@ int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx)
return 0;
}
+
+SEC("iter.s/bpf_map_elem")
+int sleepable_dummy_dump(struct bpf_iter__bpf_map_elem *ctx)
+{
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(ctx->meta->seq, "map dump starts\n");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c
new file mode 100644
index 000000000000..7b69e1887705
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Red Hat, Inc. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("iter/bpf_link")
+int dump_bpf_link(struct bpf_iter__bpf_link *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct bpf_link *link = ctx->link;
+ int link_id;
+
+ if (!link)
+ return 0;
+
+ link_id = link->id;
+ bpf_seq_write(seq, &link_id, sizeof(link_id));
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
index 08651b23edba..c868ffb8080f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
@@ -23,6 +22,6 @@ int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
BPF_SEQ_PRINTF(seq, "%8u %8ld %8ld %10lu\n", map->id, map->refcnt.counter,
map->usercnt.counter,
- map->memory.user->locked_vm.counter);
+ 0LLU);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
index 85fa710fad90..9fdea8cd4c6f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
@@ -1,17 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
-struct key_t {
- int a;
- int b;
- int c;
-};
-
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 3);
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c
index feaaa2b89c57..aa529f76c7fc 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -20,7 +20,7 @@ struct {
} hashmap1 SEC(".maps");
/* will set before prog run */
-volatile const __u32 num_cpus = 0;
+volatile const __s32 num_cpus = 0;
/* will collect results during prog run */
__u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0;
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
new file mode 100644
index 000000000000..e88dab196e0f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Google LLC. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} sk_stg_map SEC(".maps");
+
+SEC("iter/bpf_sk_storage_map")
+int delete_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
+{
+ if (ctx->sk)
+ bpf_sk_storage_delete(&sk_stg_map, ctx->sk);
+
+ return 0;
+}
+
+SEC("iter/task_file")
+int fill_socket_owner(struct bpf_iter__task_file *ctx)
+{
+ struct task_struct *task = ctx->task;
+ struct file *file = ctx->file;
+ struct socket *sock;
+ int *sock_tgid;
+
+ if (!task || !file)
+ return 0;
+
+ sock = bpf_sock_from_file(file);
+ if (!sock)
+ return 0;
+
+ sock_tgid = bpf_sk_storage_get(&sk_stg_map, sock->sk, 0, 0);
+ if (!sock_tgid)
+ return 0;
+
+ *sock_tgid = task->tgid;
+
+ return 0;
+}
+
+SEC("iter/tcp")
+int negate_socket_local_storage(struct bpf_iter__tcp *ctx)
+{
+ struct sock_common *sk_common = ctx->sk_common;
+ int *sock_tgid;
+
+ if (!sk_common)
+ return 0;
+
+ sock_tgid = bpf_sk_storage_get(&sk_stg_map, sk_common, 0, 0);
+ if (!sock_tgid)
+ return 0;
+
+ *sock_tgid = -*sock_tgid;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c
index 6b70ccaba301..eb9642923e1c 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -16,19 +16,37 @@ struct {
__u32 val_sum = 0;
__u32 ipv6_sk_count = 0;
+__u32 to_add_val = 0;
SEC("iter/bpf_sk_storage_map")
-int dump_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
+int rw_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
{
struct sock *sk = ctx->sk;
__u32 *val = ctx->value;
- if (sk == (void *)0 || val == (void *)0)
+ if (sk == NULL || val == NULL)
return 0;
if (sk->sk_family == AF_INET6)
ipv6_sk_count++;
val_sum += *val;
+
+ *val += to_add_val;
+
+ return 0;
+}
+
+SEC("iter/bpf_sk_storage_map")
+int oob_write_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
+{
+ struct sock *sk = ctx->sk;
+ __u32 *val = ctx->value;
+
+ if (sk == NULL || val == NULL)
+ return 0;
+
+ *(val + 1) = 0xdeadbeef;
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c
index d58d9f1642b5..73a5cf3ba3d3 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
new file mode 100644
index 000000000000..3e725b1fce37
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+unsigned long last_sym_value = 0;
+
+static inline char to_lower(char c)
+{
+ if (c >= 'A' && c <= 'Z')
+ c += ('a' - 'A');
+ return c;
+}
+
+static inline char to_upper(char c)
+{
+ if (c >= 'a' && c <= 'z')
+ c -= ('a' - 'A');
+ return c;
+}
+
+/* Dump symbols with max size; the latter is calculated by caching symbol N value
+ * and when iterating on symbol N+1, we can print max size of symbol N via
+ * address of N+1 - address of N.
+ */
+SEC("iter/ksym")
+int dump_ksym(struct bpf_iter__ksym *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct kallsym_iter *iter = ctx->ksym;
+ __u32 seq_num = ctx->meta->seq_num;
+ unsigned long value;
+ char type;
+
+ if (!iter)
+ return 0;
+
+ if (seq_num == 0) {
+ BPF_SEQ_PRINTF(seq, "ADDR TYPE NAME MODULE_NAME KIND MAX_SIZE\n");
+ return 0;
+ }
+ if (last_sym_value)
+ BPF_SEQ_PRINTF(seq, "0x%x\n", iter->value - last_sym_value);
+ else
+ BPF_SEQ_PRINTF(seq, "\n");
+
+ value = iter->show_value ? iter->value : 0;
+
+ last_sym_value = value;
+
+ type = iter->type;
+
+ if (iter->module_name[0]) {
+ type = iter->exported ? to_upper(type) : to_lower(type);
+ BPF_SEQ_PRINTF(seq, "0x%llx %c %s [ %s ] ",
+ value, type, iter->name, iter->module_name);
+ } else {
+ BPF_SEQ_PRINTF(seq, "0x%llx %c %s ", value, type, iter->name);
+ }
+ if (!iter->pos_mod_end || iter->pos_mod_end > iter->pos)
+ BPF_SEQ_PRINTF(seq, "MOD ");
+ else if (!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > iter->pos)
+ BPF_SEQ_PRINTF(seq, "FTRACE_MOD ");
+ else if (!iter->pos_bpf_end || iter->pos_bpf_end > iter->pos)
+ BPF_SEQ_PRINTF(seq, "BPF ");
+ else
+ BPF_SEQ_PRINTF(seq, "KPROBE ");
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c b/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c
new file mode 100644
index 000000000000..2f20485e0de3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 value_sum = 0;
+
+SEC("iter/bpf_map_elem")
+int dump_bpf_map_values(struct bpf_iter__bpf_map_elem *ctx)
+{
+ __u32 value = 0;
+
+ if (ctx->value == (void *)0)
+ return 0;
+
+ bpf_probe_read_kernel(&value, sizeof(value), ctx->value);
+ value_sum += value;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
index 95989f4c99b5..00b2ceae81fb 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c
new file mode 100644
index 000000000000..774d4dbe8189
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <vmlinux.h>
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define bpf_tcp_sk(skc) ({ \
+ struct sock_common *_skc = skc; \
+ sk = NULL; \
+ tp = NULL; \
+ if (_skc) { \
+ tp = bpf_skc_to_tcp_sock(_skc); \
+ sk = (struct sock *)tp; \
+ } \
+ tp; \
+})
+
+unsigned short reuse_listen_hport = 0;
+unsigned short listen_hport = 0;
+char cubic_cc[TCP_CA_NAME_MAX] = "bpf_cubic";
+char dctcp_cc[TCP_CA_NAME_MAX] = "bpf_dctcp";
+bool random_retry = false;
+
+static bool tcp_cc_eq(const char *a, const char *b)
+{
+ int i;
+
+ for (i = 0; i < TCP_CA_NAME_MAX; i++) {
+ if (a[i] != b[i])
+ return false;
+ if (!a[i])
+ break;
+ }
+
+ return true;
+}
+
+SEC("iter/tcp")
+int change_tcp_cc(struct bpf_iter__tcp *ctx)
+{
+ char cur_cc[TCP_CA_NAME_MAX];
+ struct tcp_sock *tp;
+ struct sock *sk;
+
+ if (!bpf_tcp_sk(ctx->sk_common))
+ return 0;
+
+ if (sk->sk_family != AF_INET6 ||
+ (sk->sk_state != TCP_LISTEN &&
+ sk->sk_state != TCP_ESTABLISHED) ||
+ (sk->sk_num != reuse_listen_hport &&
+ sk->sk_num != listen_hport &&
+ bpf_ntohs(sk->sk_dport) != listen_hport))
+ return 0;
+
+ if (bpf_getsockopt(tp, SOL_TCP, TCP_CONGESTION,
+ cur_cc, sizeof(cur_cc)))
+ return 0;
+
+ if (!tcp_cc_eq(cur_cc, cubic_cc))
+ return 0;
+
+ if (random_retry && bpf_get_prandom_u32() % 4 == 1)
+ return 1;
+
+ bpf_setsockopt(tp, SOL_TCP, TCP_CONGESTION, dctcp_cc, sizeof(dctcp_cc));
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c
new file mode 100644
index 000000000000..d92631ec6161
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+#include <vmlinux.h>
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <limits.h>
+
+#define AUTOBIND_LEN 6
+char sun_path[AUTOBIND_LEN];
+
+#define NR_CASES 5
+int sndbuf_setsockopt[NR_CASES] = {-1, 0, 8192, INT_MAX / 2, INT_MAX};
+int sndbuf_getsockopt[NR_CASES] = {-1, -1, -1, -1, -1};
+int sndbuf_getsockopt_expected[NR_CASES];
+
+static inline int cmpname(struct unix_sock *unix_sk)
+{
+ int i;
+
+ for (i = 0; i < AUTOBIND_LEN; i++) {
+ if (unix_sk->addr->name->sun_path[i] != sun_path[i])
+ return -1;
+ }
+
+ return 0;
+}
+
+SEC("iter/unix")
+int change_sndbuf(struct bpf_iter__unix *ctx)
+{
+ struct unix_sock *unix_sk = ctx->unix_sk;
+ int i, err;
+
+ if (!unix_sk || !unix_sk->addr)
+ return 0;
+
+ if (unix_sk->addr->name->sun_path[0])
+ return 0;
+
+ if (cmpname(unix_sk))
+ return 0;
+
+ for (i = 0; i < NR_CASES; i++) {
+ err = bpf_setsockopt(unix_sk, SOL_SOCKET, SO_SNDBUF,
+ &sndbuf_setsockopt[i],
+ sizeof(sndbuf_setsockopt[i]));
+ if (err)
+ break;
+
+ err = bpf_getsockopt(unix_sk, SOL_SOCKET, SO_SNDBUF,
+ &sndbuf_getsockopt[i],
+ sizeof(sndbuf_getsockopt[i]));
+ if (err)
+ break;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c
index f3af0e30cead..317fe49760cc 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Cloudflare */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task.c b/tools/testing/selftests/bpf/progs/bpf_iter_task.c
deleted file mode 100644
index 4983087852a0..000000000000
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task.c
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-
-char _license[] SEC("license") = "GPL";
-
-SEC("iter/task")
-int dump_task(struct bpf_iter__task *ctx)
-{
- struct seq_file *seq = ctx->meta->seq;
- struct task_struct *task = ctx->task;
-
- if (task == (void *)0) {
- BPF_SEQ_PRINTF(seq, " === END ===\n");
- return 0;
- }
-
- if (ctx->meta->seq_num == 0)
- BPF_SEQ_PRINTF(seq, " tgid gid\n");
-
- BPF_SEQ_PRINTF(seq, "%8d %8d\n", task->tgid, task->pid);
- return 0;
-}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
index a1ddc36f13ec..ef2f7c8d9373 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Oracle and/or its affiliates. */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include <errno.h>
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
index b2f7c7c5f952..959a8d899eaf 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
@@ -1,21 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
int count = 0;
int tgid = 0;
+int last_tgid = 0;
+int unique_tgid_count = 0;
SEC("iter/task_file")
int dump_task_file(struct bpf_iter__task_file *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
- __u32 fd = ctx->fd;
struct file *file = ctx->file;
+ __u32 fd = ctx->fd;
if (task == (void *)0 || file == (void *)0)
return 0;
@@ -28,6 +29,11 @@ int dump_task_file(struct bpf_iter__task_file *ctx)
if (tgid == task->tgid && task->tgid != task->pid)
count++;
+ if (last_tgid != task->tgid) {
+ last_tgid = task->tgid;
+ unique_tgid_count++;
+ }
+
BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd,
(long)file->f_op);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
index 50e59a2e142e..f5a309455490 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
@@ -35,3 +34,35 @@ int dump_task_stack(struct bpf_iter__task *ctx)
return 0;
}
+
+int num_user_stacks = 0;
+
+SEC("iter/task")
+int get_task_user_stacks(struct bpf_iter__task *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+ uint64_t buf_sz = 0;
+ int64_t res;
+
+ if (task == (void *)0)
+ return 0;
+
+ res = bpf_get_task_stack(task, entries,
+ MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, BPF_F_USER_STACK);
+ if (res <= 0)
+ return 0;
+
+ /* Only one task, the current one, should succeed */
+ ++num_user_stacks;
+
+ buf_sz += res;
+
+ /* If the verifier doesn't refine bpf_get_task_stack res, and instead
+ * assumes res is entirely unknown, this program will fail to load as
+ * the verifier will believe that max buf_sz value allows reading
+ * past the end of entries in bpf_seq_write call
+ */
+ bpf_seq_write(seq, &entries, buf_sz);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c
new file mode 100644
index 000000000000..d64ba7ddaed5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* Copied from mm.h */
+#define VM_READ 0x00000001
+#define VM_WRITE 0x00000002
+#define VM_EXEC 0x00000004
+#define VM_MAYSHARE 0x00000080
+
+/* Copied from kdev_t.h */
+#define MINORBITS 20
+#define MINORMASK ((1U << MINORBITS) - 1)
+#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS))
+#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK))
+
+#define D_PATH_BUF_SIZE 1024
+char d_path_buf[D_PATH_BUF_SIZE] = {};
+__u32 pid = 0;
+__u32 one_task = 0;
+__u32 one_task_error = 0;
+
+SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx)
+{
+ struct vm_area_struct *vma = ctx->vma;
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+ struct file *file;
+ char perm_str[] = "----";
+
+ if (task == (void *)0 || vma == (void *)0)
+ return 0;
+
+ file = vma->vm_file;
+ if (task->tgid != (pid_t)pid) {
+ if (one_task)
+ one_task_error = 1;
+ return 0;
+ }
+ perm_str[0] = (vma->vm_flags & VM_READ) ? 'r' : '-';
+ perm_str[1] = (vma->vm_flags & VM_WRITE) ? 'w' : '-';
+ perm_str[2] = (vma->vm_flags & VM_EXEC) ? 'x' : '-';
+ perm_str[3] = (vma->vm_flags & VM_MAYSHARE) ? 's' : 'p';
+ BPF_SEQ_PRINTF(seq, "%08llx-%08llx %s ", vma->vm_start, vma->vm_end, perm_str);
+
+ if (file) {
+ __u32 dev = file->f_inode->i_sb->s_dev;
+
+ bpf_d_path(&file->f_path, d_path_buf, D_PATH_BUF_SIZE);
+
+ BPF_SEQ_PRINTF(seq, "%08llx ", vma->vm_pgoff << 12);
+ BPF_SEQ_PRINTF(seq, "%02x:%02x %u", MAJOR(dev), MINOR(dev),
+ file->f_inode->i_ino);
+ BPF_SEQ_PRINTF(seq, "\t%s\n", d_path_buf);
+ } else {
+ BPF_SEQ_PRINTF(seq, "%08llx 00:00 0\n", 0ULL);
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
new file mode 100644
index 000000000000..966ee5a7b066
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+uint32_t tid = 0;
+int num_unknown_tid = 0;
+int num_known_tid = 0;
+void *user_ptr = 0;
+void *user_ptr_long = 0;
+uint32_t pid = 0;
+
+static char big_str1[5000];
+static char big_str2[5005];
+static char big_str3[4996];
+
+SEC("iter/task")
+int dump_task(struct bpf_iter__task *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+ static char info[] = " === END ===";
+
+ if (task == (void *)0) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ if (task->pid != (pid_t)tid)
+ num_unknown_tid++;
+ else
+ num_known_tid++;
+
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(seq, " tgid gid\n");
+
+ BPF_SEQ_PRINTF(seq, "%8d %8d\n", task->tgid, task->pid);
+ return 0;
+}
+
+int num_expected_failure_copy_from_user_task = 0;
+int num_expected_failure_copy_from_user_task_str = 0;
+int num_success_copy_from_user_task = 0;
+int num_success_copy_from_user_task_str = 0;
+
+SEC("iter.s/task")
+int dump_task_sleepable(struct bpf_iter__task *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+ static const char info[] = " === END ===";
+ struct pt_regs *regs;
+ char task_str1[10] = "aaaaaaaaaa";
+ char task_str2[10], task_str3[10];
+ char task_str4[20] = "aaaaaaaaaaaaaaaaaaaa";
+ void *ptr;
+ uint32_t user_data = 0;
+ int ret;
+
+ if (task == (void *)0) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Read an invalid pointer and ensure we get an error */
+ ptr = NULL;
+ ret = bpf_copy_from_user_task(&user_data, sizeof(uint32_t), ptr, task, 0);
+ if (ret) {
+ ++num_expected_failure_copy_from_user_task;
+ } else {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Try to read the contents of the task's instruction pointer from the
+ * remote task's address space.
+ */
+ regs = (struct pt_regs *)bpf_task_pt_regs(task);
+ if (regs == (void *)0) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+ ptr = (void *)PT_REGS_IP(regs);
+
+ ret = bpf_copy_from_user_task(&user_data, sizeof(uint32_t), ptr, task, 0);
+ if (ret) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ ++num_success_copy_from_user_task;
+
+ /* Read an invalid pointer and ensure we get an error */
+ ptr = NULL;
+ ret = bpf_copy_from_user_task_str((char *)task_str1, sizeof(task_str1), ptr, task, 0);
+ if (ret >= 0 || task_str1[9] != 'a' || task_str1[0] != '\0') {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Read an invalid pointer and ensure we get error with pad zeros flag */
+ ptr = NULL;
+ ret = bpf_copy_from_user_task_str((char *)task_str1, sizeof(task_str1),
+ ptr, task, BPF_F_PAD_ZEROS);
+ if (ret >= 0 || task_str1[9] != '\0' || task_str1[0] != '\0') {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ ++num_expected_failure_copy_from_user_task_str;
+
+ /* Same length as the string */
+ ret = bpf_copy_from_user_task_str((char *)task_str2, 10, user_ptr, task, 0);
+ /* only need to do the task pid check once */
+ if (bpf_strncmp(task_str2, 10, "test_data\0") != 0 || ret != 10 || task->tgid != pid) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Shorter length than the string */
+ ret = bpf_copy_from_user_task_str((char *)task_str3, 2, user_ptr, task, 0);
+ if (bpf_strncmp(task_str3, 2, "t\0") != 0 || ret != 2) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Longer length than the string */
+ ret = bpf_copy_from_user_task_str((char *)task_str4, 20, user_ptr, task, 0);
+ if (bpf_strncmp(task_str4, 10, "test_data\0") != 0 || ret != 10
+ || task_str4[sizeof(task_str4) - 1] != 'a') {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Longer length than the string with pad zeros flag */
+ ret = bpf_copy_from_user_task_str((char *)task_str4, 20, user_ptr, task, BPF_F_PAD_ZEROS);
+ if (bpf_strncmp(task_str4, 10, "test_data\0") != 0 || ret != 10
+ || task_str4[sizeof(task_str4) - 1] != '\0') {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Longer length than the string past a page boundary */
+ ret = bpf_copy_from_user_task_str(big_str1, 5000, user_ptr, task, 0);
+ if (bpf_strncmp(big_str1, 10, "test_data\0") != 0 || ret != 10) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* String that crosses a page boundary */
+ ret = bpf_copy_from_user_task_str(big_str1, 5000, user_ptr_long, task, BPF_F_PAD_ZEROS);
+ if (bpf_strncmp(big_str1, 4, "baba") != 0 || ret != 5000
+ || bpf_strncmp(big_str1 + 4996, 4, "bab\0") != 0) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ for (int i = 0; i < 4999; ++i) {
+ if (i % 2 == 0) {
+ if (big_str1[i] != 'b') {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+ } else {
+ if (big_str1[i] != 'a') {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+ }
+ }
+
+ /* Longer length than the string that crosses a page boundary */
+ ret = bpf_copy_from_user_task_str(big_str2, 5005, user_ptr_long, task, BPF_F_PAD_ZEROS);
+ if (bpf_strncmp(big_str2, 4, "baba") != 0 || ret != 5000
+ || bpf_strncmp(big_str2 + 4996, 5, "bab\0\0") != 0) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Shorter length than the string that crosses a page boundary */
+ ret = bpf_copy_from_user_task_str(big_str3, 4996, user_ptr_long, task, 0);
+ if (bpf_strncmp(big_str3, 4, "baba") != 0 || ret != 4996
+ || bpf_strncmp(big_str3 + 4992, 4, "bab\0") != 0) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ ++num_success_copy_from_user_task_str;
+
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(seq, " tgid gid data\n");
+
+ BPF_SEQ_PRINTF(seq, "%8d %8d %8d\n", task->tgid, task->pid, user_data);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
index 54380c5e1069..164640db3a29 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
@@ -100,10 +99,10 @@ static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp,
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
- timer_expires = icsk->icsk_timeout;
+ timer_expires = icsk->icsk_retransmit_timer.expires;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
- timer_expires = icsk->icsk_timeout;
+ timer_expires = icsk->icsk_retransmit_timer.expires;
} else if (timer_pending(&sp->sk_timer)) {
timer_active = 2;
timer_expires = sp->sk_timer.expires;
@@ -122,7 +121,7 @@ static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp,
}
BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
- seq_num, src, srcp, destp, destp);
+ seq_num, src, srcp, dest, destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ",
state,
tp->write_seq - tp->snd_una, rx_queue,
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
index b4fbddfa4e10..591c703f5032 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
@@ -100,10 +99,10 @@ static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp,
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
- timer_expires = icsk->icsk_timeout;
+ timer_expires = icsk->icsk_retransmit_timer.expires;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
- timer_expires = icsk->icsk_timeout;
+ timer_expires = icsk->icsk_retransmit_timer.expires;
} else if (timer_pending(&sp->sk_timer)) {
timer_active = 2;
timer_expires = sp->sk_timer.expires;
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c
index 2a4647f20c46..6b17e7e86a48 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
index ee49493dc125..56177508798f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
@@ -9,8 +9,8 @@ __u32 map1_id = 0, map2_id = 0;
__u32 map1_accessed = 0, map2_accessed = 0;
__u64 map1_seqnum = 0, map2_seqnum1 = 0, map2_seqnum2 = 0;
-static volatile const __u32 print_len;
-static volatile const __u32 ret1;
+volatile const __u32 print_len;
+volatile const __u32 ret1;
SEC("iter/bpf_map")
int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
@@ -45,7 +45,7 @@ int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
}
/* fill seq_file buffer */
- for (i = 0; i < print_len; i++)
+ for (i = 0; i < (int)print_len; i++)
bpf_seq_write(seq, &seq_num, sizeof(seq_num));
return ret;
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c
index e3a7575e81d2..9d8b7310d2c2 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c
index 1c7304f56b1e..b150bd468824 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h
index d5e3df66ad9a..6a4c50497c5e 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
index f258583afbbd..ffbd4b116d17 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
index 65f93bb03f0f..47ff7754f4fd 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_unix.c b/tools/testing/selftests/bpf/progs/bpf_iter_unix.c
new file mode 100644
index 000000000000..fea275df9e22
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_unix.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+#include <vmlinux.h>
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+char _license[] SEC("license") = "GPL";
+
+static long sock_i_ino(const struct sock *sk)
+{
+ const struct socket *sk_socket = sk->sk_socket;
+ const struct inode *inode;
+ unsigned long ino;
+
+ if (!sk_socket)
+ return 0;
+
+ inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
+ bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
+ return ino;
+}
+
+SEC("iter/unix")
+int dump_unix(struct bpf_iter__unix *ctx)
+{
+ struct unix_sock *unix_sk = ctx->unix_sk;
+ struct sock *sk = (struct sock *)unix_sk;
+ struct seq_file *seq;
+ __u32 seq_num;
+
+ if (!unix_sk)
+ return 0;
+
+ seq = ctx->meta->seq;
+ seq_num = ctx->meta->seq_num;
+ if (seq_num == 0)
+ BPF_SEQ_PRINTF(seq, "Num RefCount Protocol Flags Type St Inode Path\n");
+
+ BPF_SEQ_PRINTF(seq, "%pK: %08X %08X %08X %04X %02X %8lu",
+ unix_sk,
+ sk->sk_refcnt.refs.counter,
+ 0,
+ sk->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
+ sk->sk_type,
+ sk->sk_socket ?
+ (sk->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
+ (sk->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
+ sock_i_ino(sk));
+
+ if (unix_sk->addr) {
+ if (unix_sk->addr->name->sun_path[0]) {
+ BPF_SEQ_PRINTF(seq, " %s", unix_sk->addr->name->sun_path);
+ } else {
+ /* The name of the abstract UNIX domain socket starts
+ * with '\0' and can contain '\0'. The null bytes
+ * should be escaped as done in unix_seq_show().
+ */
+ __u64 i, len;
+
+ len = unix_sk->addr->len - sizeof(short);
+
+ BPF_SEQ_PRINTF(seq, " @");
+
+ for (i = 1; i < len; i++) {
+ /* unix_validate_addr() tests this upper bound. */
+ if (i >= sizeof(struct sockaddr_un))
+ break;
+
+ BPF_SEQ_PRINTF(seq, "%c",
+ unix_sk->addr->name->sun_path[i] ?:
+ '@');
+ }
+ }
+ }
+
+ BPF_SEQ_PRINTF(seq, "\n");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c b/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c
new file mode 100644
index 000000000000..174298e122d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 unique_tgid_cnt = 0;
+uintptr_t address = 0;
+uintptr_t offset = 0;
+__u32 last_tgid = 0;
+__u32 pid = 0;
+__u32 page_shift = 0;
+
+SEC("iter/task_vma")
+int get_vma_offset(struct bpf_iter__task_vma *ctx)
+{
+ struct vm_area_struct *vma = ctx->vma;
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+
+ if (task == NULL || vma == NULL)
+ return 0;
+
+ if (last_tgid != task->tgid)
+ unique_tgid_cnt++;
+ last_tgid = task->tgid;
+
+ if (task->tgid != pid)
+ return 0;
+
+ if (vma->vm_start <= address && vma->vm_end > address) {
+ offset = address - vma->vm_start + (vma->vm_pgoff << page_shift);
+ BPF_SEQ_PRINTF(seq, "OK\n");
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_loop.c b/tools/testing/selftests/bpf/progs/bpf_loop.c
new file mode 100644
index 000000000000..1d194455b109
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_loop.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct callback_ctx {
+ int output;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 32);
+ __type(key, int);
+ __type(value, int);
+} map1 SEC(".maps");
+
+/* These should be set by the user program */
+u32 nested_callback_nr_loops;
+u32 stop_index = -1;
+u32 nr_loops;
+int pid;
+int callback_selector;
+
+/* Making these global variables so that the userspace program
+ * can verify the output through the skeleton
+ */
+int nr_loops_returned;
+int g_output;
+int err;
+
+static int callback(__u32 index, void *data)
+{
+ struct callback_ctx *ctx = data;
+
+ if (index >= stop_index)
+ return 1;
+
+ ctx->output += index;
+
+ return 0;
+}
+
+static int empty_callback(__u32 index, void *data)
+{
+ return 0;
+}
+
+static int nested_callback2(__u32 index, void *data)
+{
+ nr_loops_returned += bpf_loop(nested_callback_nr_loops, callback, data, 0);
+
+ return 0;
+}
+
+static int nested_callback1(__u32 index, void *data)
+{
+ bpf_loop(nested_callback_nr_loops, nested_callback2, data, 0);
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int test_prog(void *ctx)
+{
+ struct callback_ctx data = {};
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ nr_loops_returned = bpf_loop(nr_loops, callback, &data, 0);
+
+ if (nr_loops_returned < 0)
+ err = nr_loops_returned;
+ else
+ g_output = data.output;
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int prog_null_ctx(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ nr_loops_returned = bpf_loop(nr_loops, empty_callback, NULL, 0);
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int prog_invalid_flags(void *ctx)
+{
+ struct callback_ctx data = {};
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ err = bpf_loop(nr_loops, callback, &data, 1);
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int prog_nested_calls(void *ctx)
+{
+ struct callback_ctx data = {};
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ nr_loops_returned = 0;
+ bpf_loop(nr_loops, nested_callback1, &data, 0);
+
+ g_output = data.output;
+
+ return 0;
+}
+
+static int callback_set_f0(int i, void *ctx)
+{
+ g_output = 0xF0;
+ return 0;
+}
+
+static int callback_set_0f(int i, void *ctx)
+{
+ g_output = 0x0F;
+ return 0;
+}
+
+/*
+ * non-constant callback is a corner case for bpf_loop inline logic
+ */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int prog_non_constant_callback(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ int (*callback)(int i, void *ctx);
+
+ g_output = 0;
+
+ if (callback_selector == 0x0F)
+ callback = callback_set_0f;
+ else
+ callback = callback_set_f0;
+
+ bpf_loop(1, callback, NULL, 0);
+
+ return 0;
+}
+
+static int stack_check_inner_callback(void *ctx)
+{
+ return 0;
+}
+
+static int map1_lookup_elem(int key)
+{
+ int *val = bpf_map_lookup_elem(&map1, &key);
+
+ return val ? *val : -1;
+}
+
+static void map1_update_elem(int key, int val)
+{
+ bpf_map_update_elem(&map1, &key, &val, BPF_ANY);
+}
+
+static int stack_check_outer_callback(void *ctx)
+{
+ int a = map1_lookup_elem(1);
+ int b = map1_lookup_elem(2);
+ int c = map1_lookup_elem(3);
+ int d = map1_lookup_elem(4);
+ int e = map1_lookup_elem(5);
+ int f = map1_lookup_elem(6);
+
+ bpf_loop(1, stack_check_inner_callback, NULL, 0);
+
+ map1_update_elem(1, a + 1);
+ map1_update_elem(2, b + 1);
+ map1_update_elem(3, c + 1);
+ map1_update_elem(4, d + 1);
+ map1_update_elem(5, e + 1);
+ map1_update_elem(6, f + 1);
+
+ return 0;
+}
+
+/* Some of the local variables in stack_check and
+ * stack_check_outer_callback would be allocated on stack by
+ * compiler. This test should verify that stack content for these
+ * variables is preserved between calls to bpf_loop (might be an issue
+ * if loop inlining allocates stack slots incorrectly).
+ */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int stack_check(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ int a = map1_lookup_elem(7);
+ int b = map1_lookup_elem(8);
+ int c = map1_lookup_elem(9);
+ int d = map1_lookup_elem(10);
+ int e = map1_lookup_elem(11);
+ int f = map1_lookup_elem(12);
+
+ bpf_loop(1, stack_check_outer_callback, NULL, 0);
+
+ map1_update_elem(7, a + 1);
+ map1_update_elem(8, b + 1);
+ map1_update_elem(9, c + 1);
+ map1_update_elem(10, d + 1);
+ map1_update_elem(11, e + 1);
+ map1_update_elem(12, f + 1);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_loop_bench.c b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
new file mode 100644
index 000000000000..d461746fd3c1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+u32 nr_loops;
+long hits;
+
+static int empty_callback(__u32 index, void *data)
+{
+ return 0;
+}
+
+static int outer_loop(__u32 index, void *data)
+{
+ bpf_loop(nr_loops, empty_callback, NULL, 0);
+ __sync_add_and_fetch(&hits, nr_loops);
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int benchmark(void *ctx)
+{
+ bpf_loop(1000, outer_loop, NULL, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
new file mode 100644
index 000000000000..c1cfd297aabf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BPF_MISC_H__
+#define __BPF_MISC_H__
+
+#define XSTR(s) STR(s)
+#define STR(s) #s
+
+/* Expand a macro and then stringize the expansion */
+#define QUOTE(str) #str
+#define EXPAND_QUOTE(str) QUOTE(str)
+
+/* This set of attributes controls behavior of the
+ * test_loader.c:test_loader__run_subtests().
+ *
+ * The test_loader sequentially loads each program in a skeleton.
+ * Programs could be loaded in privileged and unprivileged modes.
+ * - __success, __failure, __msg, __regex imply privileged mode;
+ * - __success_unpriv, __failure_unpriv, __msg_unpriv, __regex_unpriv
+ * imply unprivileged mode.
+ * If combination of privileged and unprivileged attributes is present
+ * both modes are used. If none are present privileged mode is implied.
+ *
+ * See test_loader.c:drop_capabilities() for exact set of capabilities
+ * that differ between privileged and unprivileged modes.
+ *
+ * For test filtering purposes the name of the program loaded in
+ * unprivileged mode is derived from the usual program name by adding
+ * `@unpriv' suffix.
+ *
+ * __msg Message expected to be found in the verifier log.
+ * Multiple __msg attributes could be specified.
+ * To match a regular expression use "{{" "}}" brackets,
+ * e.g. "foo{{[0-9]+}}" matches strings like "foo007".
+ * Extended POSIX regular expression syntax is allowed
+ * inside the brackets.
+ * __msg_unpriv Same as __msg but for unprivileged mode.
+ *
+ * __xlated Expect a line in a disassembly log after verifier applies rewrites.
+ * Multiple __xlated attributes could be specified.
+ * Regular expressions could be specified same way as in __msg.
+ * __xlated_unpriv Same as __xlated but for unprivileged mode.
+ *
+ * __jited Match a line in a disassembly of the jited BPF program.
+ * Has to be used after __arch_* macro.
+ * For example:
+ *
+ * __arch_x86_64
+ * __jited(" endbr64")
+ * __jited(" nopl (%rax,%rax)")
+ * __jited(" xorq %rax, %rax")
+ * ...
+ * __naked void some_test(void)
+ * {
+ * asm volatile (... ::: __clobber_all);
+ * }
+ *
+ * Regular expressions could be included in patterns same way
+ * as in __msg.
+ *
+ * By default assume that each pattern has to be matched on the
+ * next consecutive line of disassembly, e.g.:
+ *
+ * __jited(" endbr64") # matched on line N
+ * __jited(" nopl (%rax,%rax)") # matched on line N+1
+ *
+ * If match occurs on a wrong line an error is reported.
+ * To override this behaviour use literal "...", e.g.:
+ *
+ * __jited(" endbr64") # matched on line N
+ * __jited("...") # not matched
+ * __jited(" nopl (%rax,%rax)") # matched on any line >= N
+ *
+ * __jited_unpriv Same as __jited but for unprivileged mode.
+ *
+ *
+ * __success Expect program load success in privileged mode.
+ * __success_unpriv Expect program load success in unprivileged mode.
+ *
+ * __failure Expect program load failure in privileged mode.
+ * __failure_unpriv Expect program load failure in unprivileged mode.
+ *
+ * __retval Execute the program using BPF_PROG_TEST_RUN command,
+ * expect return value to match passed parameter:
+ * - a decimal number
+ * - a hexadecimal number, when starts from 0x
+ * - a macro which expands to one of the above
+ * - literal _INT_MIN (expands to INT_MIN)
+ * In addition, two special macros are defined below:
+ * - POINTER_VALUE
+ * - TEST_DATA_LEN
+ * __retval_unpriv Same, but load program in unprivileged mode.
+ *
+ * __description Text to be used instead of a program name for display
+ * and filtering purposes.
+ *
+ * __log_level Log level to use for the program, numeric value expected.
+ *
+ * __flag Adds one flag use for the program, the following values are valid:
+ * - BPF_F_STRICT_ALIGNMENT;
+ * - BPF_F_TEST_RND_HI32;
+ * - BPF_F_TEST_STATE_FREQ;
+ * - BPF_F_SLEEPABLE;
+ * - BPF_F_XDP_HAS_FRAGS;
+ * - A numeric value.
+ * Multiple __flag attributes could be specified, the final flags
+ * value is derived by applying binary "or" to all specified values.
+ *
+ * __auxiliary Annotated program is not a separate test, but used as auxiliary
+ * for some other test cases and should always be loaded.
+ * __auxiliary_unpriv Same, but load program in unprivileged mode.
+ *
+ * __arch_* Specify on which architecture the test case should be tested.
+ * Several __arch_* annotations could be specified at once.
+ * When test case is not run on current arch it is marked as skipped.
+ * __caps_unpriv Specify the capabilities that should be set when running the test.
+ */
+#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" XSTR(__COUNTER__) "=" msg)))
+#define __xlated(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated=" XSTR(__COUNTER__) "=" msg)))
+#define __jited(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg)))
+#define __failure __attribute__((btf_decl_tag("comment:test_expect_failure")))
+#define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
+#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc)))
+#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" XSTR(__COUNTER__) "=" msg)))
+#define __xlated_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated_unpriv=" XSTR(__COUNTER__) "=" msg)))
+#define __jited_unpriv(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg)))
+#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv")))
+#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv")))
+#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
+#define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag)))
+#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="XSTR(val))))
+#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="XSTR(val))))
+#define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary")))
+#define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv")))
+#define __btf_path(path) __attribute__((btf_decl_tag("comment:test_btf_path=" path)))
+#define __arch(arch) __attribute__((btf_decl_tag("comment:test_arch=" arch)))
+#define __arch_x86_64 __arch("X86_64")
+#define __arch_arm64 __arch("ARM64")
+#define __arch_riscv64 __arch("RISCV64")
+#define __caps_unpriv(caps) __attribute__((btf_decl_tag("comment:test_caps_unpriv=" EXPAND_QUOTE(caps))))
+#define __load_if_JITed() __attribute__((btf_decl_tag("comment:load_mode=jited")))
+#define __load_if_no_JITed() __attribute__((btf_decl_tag("comment:load_mode=no_jited")))
+
+/* Define common capabilities tested using __caps_unpriv */
+#define CAP_NET_ADMIN 12
+#define CAP_SYS_ADMIN 21
+#define CAP_PERFMON 38
+#define CAP_BPF 39
+
+/* Convenience macro for use with 'asm volatile' blocks */
+#define __naked __attribute__((naked))
+#define __clobber_all "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "memory"
+#define __clobber_common "r0", "r1", "r2", "r3", "r4", "r5", "memory"
+#define __imm(name) [name]"i"(name)
+#define __imm_const(name, expr) [name]"i"(expr)
+#define __imm_addr(name) [name]"i"(&name)
+#define __imm_ptr(name) [name]"r"(&name)
+#define __imm_insn(name, expr) [name]"i"(*(long *)&(expr))
+
+/* Magic constants used with __retval() */
+#define POINTER_VALUE 0xbadcafe
+#define TEST_DATA_LEN 64
+
+#ifndef __used
+#define __used __attribute__((used))
+#endif
+
+#if defined(__TARGET_ARCH_x86)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__x64_"
+#elif defined(__TARGET_ARCH_s390)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__s390x_"
+#elif defined(__TARGET_ARCH_arm64)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__arm64_"
+#elif defined(__TARGET_ARCH_riscv)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__riscv_"
+#elif defined(__TARGET_ARCH_powerpc)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX ""
+#else
+#define SYSCALL_WRAPPER 0
+#define SYS_PREFIX "__se_"
+#endif
+
+/* How many arguments are passed to function in register */
+#if defined(__TARGET_ARCH_x86) || defined(__x86_64__)
+#define FUNC_REG_ARG_CNT 6
+#elif defined(__i386__)
+#define FUNC_REG_ARG_CNT 3
+#elif defined(__TARGET_ARCH_s390) || defined(__s390x__)
+#define FUNC_REG_ARG_CNT 5
+#elif defined(__TARGET_ARCH_arm) || defined(__arm__)
+#define FUNC_REG_ARG_CNT 4
+#elif defined(__TARGET_ARCH_arm64) || defined(__aarch64__)
+#define FUNC_REG_ARG_CNT 8
+#elif defined(__TARGET_ARCH_mips) || defined(__mips__)
+#define FUNC_REG_ARG_CNT 8
+#elif defined(__TARGET_ARCH_powerpc) || defined(__powerpc__) || defined(__powerpc64__)
+#define FUNC_REG_ARG_CNT 8
+#elif defined(__TARGET_ARCH_sparc) || defined(__sparc__)
+#define FUNC_REG_ARG_CNT 6
+#elif defined(__TARGET_ARCH_riscv) || defined(__riscv__)
+#define FUNC_REG_ARG_CNT 8
+#else
+/* default to 5 for others */
+#define FUNC_REG_ARG_CNT 5
+#endif
+
+/* make it look to compiler like value is read and written */
+#define __sink(expr) asm volatile("" : "+g"(expr))
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
+#define CAN_USE_GOTOL
+#endif
+
+#if __clang_major__ >= 18
+#define CAN_USE_BPF_ST
+#endif
+
+#if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \
+ (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) || \
+ (defined(__TARGET_ARCH_powerpc))
+#define CAN_USE_LOAD_ACQ_STORE_REL
+#endif
+
+#if defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)
+#define SPEC_V1
+#endif
+
+#if defined(__TARGET_ARCH_x86)
+#define SPEC_V4
+#endif
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_mod_race.c b/tools/testing/selftests/bpf/progs/bpf_mod_race.c
new file mode 100644
index 000000000000..82a5c6c6ba83
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_mod_race.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+const volatile struct {
+ /* thread to activate trace programs for */
+ pid_t tgid;
+ /* return error from __init function */
+ int inject_error;
+ /* uffd monitored range start address */
+ void *fault_addr;
+} bpf_mod_race_config = { -1 };
+
+int bpf_blocking = 0;
+int res_try_get_module = -1;
+
+static __always_inline bool check_thread_id(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+
+ return task->tgid == bpf_mod_race_config.tgid;
+}
+
+/* The trace of execution is something like this:
+ *
+ * finit_module()
+ * load_module()
+ * prepare_coming_module()
+ * notifier_call(MODULE_STATE_COMING)
+ * btf_parse_module()
+ * btf_alloc_id() // Visible to userspace at this point
+ * list_add(btf_mod->list, &btf_modules)
+ * do_init_module()
+ * freeinit = kmalloc()
+ * ret = mod->init()
+ * bpf_prog_widen_race()
+ * bpf_copy_from_user()
+ * ...<sleep>...
+ * if (ret < 0)
+ * ...
+ * free_module()
+ * return ret
+ *
+ * At this point, module loading thread is blocked, we now load the program:
+ *
+ * bpf_check
+ * add_kfunc_call/check_pseudo_btf_id
+ * btf_try_get_module
+ * try_get_module_live == false
+ * return -ENXIO
+ *
+ * Without the fix (try_get_module_live in btf_try_get_module):
+ *
+ * bpf_check
+ * add_kfunc_call/check_pseudo_btf_id
+ * btf_try_get_module
+ * try_get_module == true
+ * <store module reference in btf_kfunc_tab or used_btf array>
+ * ...
+ * return fd
+ *
+ * Now, if we inject an error in the blocked program, our module will be freed
+ * (going straight from MODULE_STATE_COMING to MODULE_STATE_GOING).
+ * Later, when bpf program is freed, it will try to module_put already freed
+ * module. This is why try_get_module_live returns false if mod->state is not
+ * MODULE_STATE_LIVE.
+ */
+
+SEC("fmod_ret.s/bpf_fentry_test1")
+int BPF_PROG(widen_race, int a, int ret)
+{
+ char dst;
+
+ if (!check_thread_id())
+ return 0;
+ /* Indicate that we will attempt to block */
+ bpf_blocking = 1;
+ bpf_copy_from_user(&dst, 1, bpf_mod_race_config.fault_addr);
+ return bpf_mod_race_config.inject_error;
+}
+
+SEC("fexit/do_init_module")
+int BPF_PROG(fexit_init_module, struct module *mod, int ret)
+{
+ if (!check_thread_id())
+ return 0;
+ /* Indicate that we finished blocking */
+ bpf_blocking = 2;
+ return 0;
+}
+
+SEC("fexit/btf_try_get_module")
+int BPF_PROG(fexit_module_get, const struct btf *btf, struct module *mod)
+{
+ res_try_get_module = !!mod;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_common.h b/tools/testing/selftests/bpf/progs/bpf_qdisc_common.h
new file mode 100644
index 000000000000..3754f581b328
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_common.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BPF_QDISC_COMMON_H
+#define _BPF_QDISC_COMMON_H
+
+#define NET_XMIT_SUCCESS 0x00
+#define NET_XMIT_DROP 0x01 /* skb dropped */
+#define NET_XMIT_CN 0x02 /* congestion notification */
+
+#define TC_PRIO_CONTROL 7
+#define TC_PRIO_MAX 15
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+
+struct bpf_sk_buff_ptr;
+
+static struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
+{
+ return (struct qdisc_skb_cb *)skb->cb;
+}
+
+static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
+{
+ return qdisc_skb_cb(skb)->pkt_len;
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__incompl_ops.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__incompl_ops.c
new file mode 100644
index 000000000000..f188062ed730
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__incompl_ops.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "bpf_experimental.h"
+#include "bpf_qdisc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops")
+int BPF_PROG(bpf_qdisc_test_enqueue, struct sk_buff *skb, struct Qdisc *sch,
+ struct bpf_sk_buff_ptr *to_free)
+{
+ bpf_qdisc_skb_drop(skb, to_free);
+ return NET_XMIT_DROP;
+}
+
+SEC("struct_ops")
+struct sk_buff *BPF_PROG(bpf_qdisc_test_dequeue, struct Qdisc *sch)
+{
+ return NULL;
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_qdisc_test_reset, struct Qdisc *sch)
+{
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_qdisc_test_destroy, struct Qdisc *sch)
+{
+}
+
+SEC(".struct_ops")
+struct Qdisc_ops test = {
+ .enqueue = (void *)bpf_qdisc_test_enqueue,
+ .dequeue = (void *)bpf_qdisc_test_dequeue,
+ .reset = (void *)bpf_qdisc_test_reset,
+ .destroy = (void *)bpf_qdisc_test_destroy,
+ .id = "bpf_qdisc_test",
+};
+
diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
new file mode 100644
index 000000000000..1de2be3e370b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "bpf_experimental.h"
+#include "bpf_qdisc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct skb_node {
+ struct sk_buff __kptr * skb;
+ struct bpf_list_node node;
+};
+
+private(A) struct bpf_spin_lock q_fifo_lock;
+private(A) struct bpf_list_head q_fifo __contains(skb_node, node);
+
+bool init_called;
+
+SEC("struct_ops/bpf_fifo_enqueue")
+int BPF_PROG(bpf_fifo_enqueue, struct sk_buff *skb, struct Qdisc *sch,
+ struct bpf_sk_buff_ptr *to_free)
+{
+ struct skb_node *skbn;
+ u32 pkt_len;
+
+ if (sch->q.qlen == sch->limit)
+ goto drop;
+
+ skbn = bpf_obj_new(typeof(*skbn));
+ if (!skbn)
+ goto drop;
+
+ pkt_len = qdisc_pkt_len(skb);
+
+ sch->q.qlen++;
+ skb = bpf_kptr_xchg(&skbn->skb, skb);
+ if (skb)
+ bpf_qdisc_skb_drop(skb, to_free);
+
+ bpf_spin_lock(&q_fifo_lock);
+ bpf_list_push_back(&q_fifo, &skbn->node);
+ bpf_spin_unlock(&q_fifo_lock);
+
+ sch->qstats.backlog += pkt_len;
+ return NET_XMIT_SUCCESS;
+drop:
+ bpf_qdisc_skb_drop(skb, to_free);
+ return NET_XMIT_DROP;
+}
+
+SEC("struct_ops/bpf_fifo_dequeue")
+struct sk_buff *BPF_PROG(bpf_fifo_dequeue, struct Qdisc *sch)
+{
+ struct bpf_list_node *node;
+ struct sk_buff *skb = NULL;
+ struct skb_node *skbn;
+
+ bpf_spin_lock(&q_fifo_lock);
+ node = bpf_list_pop_front(&q_fifo);
+ bpf_spin_unlock(&q_fifo_lock);
+ if (!node)
+ return NULL;
+
+ skbn = container_of(node, struct skb_node, node);
+ skb = bpf_kptr_xchg(&skbn->skb, skb);
+ bpf_obj_drop(skbn);
+ if (!skb)
+ return NULL;
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ bpf_qdisc_bstats_update(sch, skb);
+ sch->q.qlen--;
+
+ return skb;
+}
+
+SEC("struct_ops/bpf_fifo_init")
+int BPF_PROG(bpf_fifo_init, struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ sch->limit = 1000;
+ init_called = true;
+ return 0;
+}
+
+SEC("struct_ops/bpf_fifo_reset")
+void BPF_PROG(bpf_fifo_reset, struct Qdisc *sch)
+{
+ struct bpf_list_node *node;
+ struct skb_node *skbn;
+ int i;
+
+ bpf_for(i, 0, sch->q.qlen) {
+ struct sk_buff *skb = NULL;
+
+ bpf_spin_lock(&q_fifo_lock);
+ node = bpf_list_pop_front(&q_fifo);
+ bpf_spin_unlock(&q_fifo_lock);
+
+ if (!node)
+ break;
+
+ skbn = container_of(node, struct skb_node, node);
+ skb = bpf_kptr_xchg(&skbn->skb, skb);
+ if (skb)
+ bpf_kfree_skb(skb);
+ bpf_obj_drop(skbn);
+ }
+ sch->q.qlen = 0;
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_fifo_destroy, struct Qdisc *sch)
+{
+}
+
+SEC(".struct_ops")
+struct Qdisc_ops fifo = {
+ .enqueue = (void *)bpf_fifo_enqueue,
+ .dequeue = (void *)bpf_fifo_dequeue,
+ .init = (void *)bpf_fifo_init,
+ .reset = (void *)bpf_fifo_reset,
+ .destroy = (void *)bpf_fifo_destroy,
+ .id = "bpf_fifo",
+};
+
diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
new file mode 100644
index 000000000000..1a3233a275c7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
@@ -0,0 +1,756 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* bpf_fq is intended for testing the bpf qdisc infrastructure and not a direct
+ * copy of sch_fq. bpf_fq implements the scheduling algorithm of sch_fq before
+ * 29f834aa326e ("net_sched: sch_fq: add 3 bands and WRR scheduling") was
+ * introduced. It gives each flow a fair chance to transmit packets in a
+ * round-robin fashion. Note that for flow pacing, bpf_fq currently only
+ * respects skb->tstamp but not skb->sk->sk_pacing_rate. In addition, if there
+ * are multiple bpf_fq instances, they will have a shared view of flows and
+ * configuration since some key data structure such as fq_prio_flows,
+ * fq_nonprio_flows, and fq_bpf_data are global.
+ *
+ * To use bpf_fq alone without running selftests, use the following commands.
+ *
+ * 1. Register bpf_fq to the kernel
+ * bpftool struct_ops register bpf_qdisc_fq.bpf.o /sys/fs/bpf
+ * 2. Add bpf_fq to an interface
+ * tc qdisc add dev <interface name> root handle <handle> bpf_fq
+ * 3. Delete bpf_fq attached to the interface
+ * tc qdisc delete dev <interface name> root
+ * 4. Unregister bpf_fq
+ * bpftool struct_ops unregister name fq
+ *
+ * The qdisc name, bpf_fq, used in tc commands is defined by Qdisc_ops.id.
+ * The struct_ops_map_name, fq, used in the bpftool command is the name of the
+ * Qdisc_ops.
+ *
+ * SEC(".struct_ops")
+ * struct Qdisc_ops fq = {
+ * ...
+ * .id = "bpf_fq",
+ * };
+ */
+
+#include <vmlinux.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+#include "bpf_qdisc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define NSEC_PER_USEC 1000L
+#define NSEC_PER_SEC 1000000000L
+
+#define NUM_QUEUE (1 << 20)
+
+struct fq_bpf_data {
+ u32 quantum;
+ u32 initial_quantum;
+ u32 flow_refill_delay;
+ u32 flow_plimit;
+ u64 horizon;
+ u32 orphan_mask;
+ u32 timer_slack;
+ u64 time_next_delayed_flow;
+ u64 unthrottle_latency_ns;
+ u8 horizon_drop;
+ u32 new_flow_cnt;
+ u32 old_flow_cnt;
+ u64 ktime_cache;
+};
+
+enum {
+ CLS_RET_PRIO = 0,
+ CLS_RET_NONPRIO = 1,
+ CLS_RET_ERR = 2,
+};
+
+struct skb_node {
+ u64 tstamp;
+ struct sk_buff __kptr * skb;
+ struct bpf_rb_node node;
+};
+
+struct fq_flow_node {
+ int credit;
+ u32 qlen;
+ u64 age;
+ u64 time_next_packet;
+ struct bpf_list_node list_node;
+ struct bpf_rb_node rb_node;
+ struct bpf_rb_root queue __contains(skb_node, node);
+ struct bpf_spin_lock lock;
+ struct bpf_refcount refcount;
+};
+
+struct dequeue_nonprio_ctx {
+ bool stop_iter;
+ u64 expire;
+ u64 now;
+};
+
+struct remove_flows_ctx {
+ bool gc_only;
+ u32 reset_cnt;
+ u32 reset_max;
+};
+
+struct unset_throttled_flows_ctx {
+ bool unset_all;
+ u64 now;
+};
+
+struct fq_stashed_flow {
+ struct fq_flow_node __kptr * flow;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u64);
+ __type(value, struct fq_stashed_flow);
+ __uint(max_entries, NUM_QUEUE);
+} fq_nonprio_flows SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u64);
+ __type(value, struct fq_stashed_flow);
+ __uint(max_entries, 1);
+} fq_prio_flows SEC(".maps");
+
+private(A) struct bpf_spin_lock fq_delayed_lock;
+private(A) struct bpf_rb_root fq_delayed __contains(fq_flow_node, rb_node);
+
+private(B) struct bpf_spin_lock fq_new_flows_lock;
+private(B) struct bpf_list_head fq_new_flows __contains(fq_flow_node, list_node);
+
+private(C) struct bpf_spin_lock fq_old_flows_lock;
+private(C) struct bpf_list_head fq_old_flows __contains(fq_flow_node, list_node);
+
+private(D) struct fq_bpf_data q;
+
+/* Wrapper for bpf_kptr_xchg that expects NULL dst */
+static void bpf_kptr_xchg_back(void *map_val, void *ptr)
+{
+ void *ret;
+
+ ret = bpf_kptr_xchg(map_val, ptr);
+ if (ret)
+ bpf_obj_drop(ret);
+}
+
+static bool skbn_tstamp_less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct skb_node *skbn_a;
+ struct skb_node *skbn_b;
+
+ skbn_a = container_of(a, struct skb_node, node);
+ skbn_b = container_of(b, struct skb_node, node);
+
+ return skbn_a->tstamp < skbn_b->tstamp;
+}
+
+static bool fn_time_next_packet_less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct fq_flow_node *flow_a;
+ struct fq_flow_node *flow_b;
+
+ flow_a = container_of(a, struct fq_flow_node, rb_node);
+ flow_b = container_of(b, struct fq_flow_node, rb_node);
+
+ return flow_a->time_next_packet < flow_b->time_next_packet;
+}
+
+static void
+fq_flows_add_head(struct bpf_list_head *head, struct bpf_spin_lock *lock,
+ struct fq_flow_node *flow, u32 *flow_cnt)
+{
+ bpf_spin_lock(lock);
+ bpf_list_push_front(head, &flow->list_node);
+ bpf_spin_unlock(lock);
+ *flow_cnt += 1;
+}
+
+static void
+fq_flows_add_tail(struct bpf_list_head *head, struct bpf_spin_lock *lock,
+ struct fq_flow_node *flow, u32 *flow_cnt)
+{
+ bpf_spin_lock(lock);
+ bpf_list_push_back(head, &flow->list_node);
+ bpf_spin_unlock(lock);
+ *flow_cnt += 1;
+}
+
+static void
+fq_flows_remove_front(struct bpf_list_head *head, struct bpf_spin_lock *lock,
+ struct bpf_list_node **node, u32 *flow_cnt)
+{
+ bpf_spin_lock(lock);
+ *node = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ *flow_cnt -= 1;
+}
+
+static bool
+fq_flows_is_empty(struct bpf_list_head *head, struct bpf_spin_lock *lock)
+{
+ struct bpf_list_node *node;
+
+ bpf_spin_lock(lock);
+ node = bpf_list_pop_front(head);
+ if (node) {
+ bpf_list_push_front(head, node);
+ bpf_spin_unlock(lock);
+ return false;
+ }
+ bpf_spin_unlock(lock);
+
+ return true;
+}
+
+/* flow->age is used to denote the state of the flow (not-detached, detached, throttled)
+ * as well as the timestamp when the flow is detached.
+ *
+ * 0: not-detached
+ * 1 - (~0ULL-1): detached
+ * ~0ULL: throttled
+ */
+static void fq_flow_set_detached(struct fq_flow_node *flow)
+{
+ flow->age = bpf_jiffies64();
+}
+
+static bool fq_flow_is_detached(struct fq_flow_node *flow)
+{
+ return flow->age != 0 && flow->age != ~0ULL;
+}
+
+static bool sk_listener(struct sock *sk)
+{
+ return (1 << sk->__sk_common.skc_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
+}
+
+static void fq_gc(void);
+
+static int fq_new_flow(void *flow_map, struct fq_stashed_flow **sflow, u64 hash)
+{
+ struct fq_stashed_flow tmp = {};
+ struct fq_flow_node *flow;
+ int ret;
+
+ flow = bpf_obj_new(typeof(*flow));
+ if (!flow)
+ return -ENOMEM;
+
+ flow->credit = q.initial_quantum,
+ flow->qlen = 0,
+ flow->age = 1,
+ flow->time_next_packet = 0,
+
+ ret = bpf_map_update_elem(flow_map, &hash, &tmp, 0);
+ if (ret == -ENOMEM || ret == -E2BIG) {
+ fq_gc();
+ bpf_map_update_elem(&fq_nonprio_flows, &hash, &tmp, 0);
+ }
+
+ *sflow = bpf_map_lookup_elem(flow_map, &hash);
+ if (!*sflow) {
+ bpf_obj_drop(flow);
+ return -ENOMEM;
+ }
+
+ bpf_kptr_xchg_back(&(*sflow)->flow, flow);
+ return 0;
+}
+
+static int
+fq_classify(struct sk_buff *skb, struct fq_stashed_flow **sflow)
+{
+ struct sock *sk = skb->sk;
+ int ret = CLS_RET_NONPRIO;
+ u64 hash = 0;
+
+ if ((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL) {
+ *sflow = bpf_map_lookup_elem(&fq_prio_flows, &hash);
+ ret = CLS_RET_PRIO;
+ } else {
+ if (!sk || sk_listener(sk)) {
+ hash = bpf_skb_get_hash(skb) & q.orphan_mask;
+ /* Avoid collision with an existing flow hash, which
+ * only uses the lower 32 bits of hash, by setting the
+ * upper half of hash to 1.
+ */
+ hash |= (1ULL << 32);
+ } else if (sk->__sk_common.skc_state == TCP_CLOSE) {
+ hash = bpf_skb_get_hash(skb) & q.orphan_mask;
+ hash |= (1ULL << 32);
+ } else {
+ hash = sk->__sk_common.skc_hash;
+ }
+ *sflow = bpf_map_lookup_elem(&fq_nonprio_flows, &hash);
+ }
+
+ if (!*sflow)
+ ret = fq_new_flow(&fq_nonprio_flows, sflow, hash) < 0 ?
+ CLS_RET_ERR : CLS_RET_NONPRIO;
+
+ return ret;
+}
+
+static bool fq_packet_beyond_horizon(struct sk_buff *skb)
+{
+ return (s64)skb->tstamp > (s64)(q.ktime_cache + q.horizon);
+}
+
+SEC("struct_ops/bpf_fq_enqueue")
+int BPF_PROG(bpf_fq_enqueue, struct sk_buff *skb, struct Qdisc *sch,
+ struct bpf_sk_buff_ptr *to_free)
+{
+ struct fq_flow_node *flow = NULL, *flow_copy;
+ struct fq_stashed_flow *sflow;
+ u64 time_to_send, jiffies;
+ struct skb_node *skbn;
+ int ret;
+
+ if (sch->q.qlen >= sch->limit)
+ goto drop;
+
+ if (!skb->tstamp) {
+ time_to_send = q.ktime_cache = bpf_ktime_get_ns();
+ } else {
+ if (fq_packet_beyond_horizon(skb)) {
+ q.ktime_cache = bpf_ktime_get_ns();
+ if (fq_packet_beyond_horizon(skb)) {
+ if (q.horizon_drop)
+ goto drop;
+
+ skb->tstamp = q.ktime_cache + q.horizon;
+ }
+ }
+ time_to_send = skb->tstamp;
+ }
+
+ ret = fq_classify(skb, &sflow);
+ if (ret == CLS_RET_ERR)
+ goto drop;
+
+ flow = bpf_kptr_xchg(&sflow->flow, flow);
+ if (!flow)
+ goto drop;
+
+ if (ret == CLS_RET_NONPRIO) {
+ if (flow->qlen >= q.flow_plimit) {
+ bpf_kptr_xchg_back(&sflow->flow, flow);
+ goto drop;
+ }
+
+ if (fq_flow_is_detached(flow)) {
+ flow_copy = bpf_refcount_acquire(flow);
+
+ jiffies = bpf_jiffies64();
+ if ((s64)(jiffies - (flow_copy->age + q.flow_refill_delay)) > 0) {
+ if (flow_copy->credit < q.quantum)
+ flow_copy->credit = q.quantum;
+ }
+ flow_copy->age = 0;
+ fq_flows_add_tail(&fq_new_flows, &fq_new_flows_lock, flow_copy,
+ &q.new_flow_cnt);
+ }
+ }
+
+ skbn = bpf_obj_new(typeof(*skbn));
+ if (!skbn) {
+ bpf_kptr_xchg_back(&sflow->flow, flow);
+ goto drop;
+ }
+
+ skbn->tstamp = skb->tstamp = time_to_send;
+
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+
+ skb = bpf_kptr_xchg(&skbn->skb, skb);
+ if (skb)
+ bpf_qdisc_skb_drop(skb, to_free);
+
+ bpf_spin_lock(&flow->lock);
+ bpf_rbtree_add(&flow->queue, &skbn->node, skbn_tstamp_less);
+ bpf_spin_unlock(&flow->lock);
+
+ flow->qlen++;
+ bpf_kptr_xchg_back(&sflow->flow, flow);
+
+ sch->q.qlen++;
+ return NET_XMIT_SUCCESS;
+
+drop:
+ bpf_qdisc_skb_drop(skb, to_free);
+ sch->qstats.drops++;
+ return NET_XMIT_DROP;
+}
+
+static int fq_unset_throttled_flows(u32 index, struct unset_throttled_flows_ctx *ctx)
+{
+ struct bpf_rb_node *node = NULL;
+ struct fq_flow_node *flow;
+
+ bpf_spin_lock(&fq_delayed_lock);
+
+ node = bpf_rbtree_first(&fq_delayed);
+ if (!node) {
+ bpf_spin_unlock(&fq_delayed_lock);
+ return 1;
+ }
+
+ flow = container_of(node, struct fq_flow_node, rb_node);
+ if (!ctx->unset_all && flow->time_next_packet > ctx->now) {
+ q.time_next_delayed_flow = flow->time_next_packet;
+ bpf_spin_unlock(&fq_delayed_lock);
+ return 1;
+ }
+
+ node = bpf_rbtree_remove(&fq_delayed, &flow->rb_node);
+
+ bpf_spin_unlock(&fq_delayed_lock);
+
+ if (!node)
+ return 1;
+
+ flow = container_of(node, struct fq_flow_node, rb_node);
+ flow->age = 0;
+ fq_flows_add_tail(&fq_old_flows, &fq_old_flows_lock, flow, &q.old_flow_cnt);
+
+ return 0;
+}
+
+static void fq_flow_set_throttled(struct fq_flow_node *flow)
+{
+ flow->age = ~0ULL;
+
+ if (q.time_next_delayed_flow > flow->time_next_packet)
+ q.time_next_delayed_flow = flow->time_next_packet;
+
+ bpf_spin_lock(&fq_delayed_lock);
+ bpf_rbtree_add(&fq_delayed, &flow->rb_node, fn_time_next_packet_less);
+ bpf_spin_unlock(&fq_delayed_lock);
+}
+
+static void fq_check_throttled(u64 now)
+{
+ struct unset_throttled_flows_ctx ctx = {
+ .unset_all = false,
+ .now = now,
+ };
+ unsigned long sample;
+
+ if (q.time_next_delayed_flow > now)
+ return;
+
+ sample = (unsigned long)(now - q.time_next_delayed_flow);
+ q.unthrottle_latency_ns -= q.unthrottle_latency_ns >> 3;
+ q.unthrottle_latency_ns += sample >> 3;
+
+ q.time_next_delayed_flow = ~0ULL;
+ bpf_loop(NUM_QUEUE, fq_unset_throttled_flows, &ctx, 0);
+}
+
+static struct sk_buff*
+fq_dequeue_nonprio_flows(u32 index, struct dequeue_nonprio_ctx *ctx)
+{
+ u64 time_next_packet, time_to_send;
+ struct bpf_rb_node *rb_node;
+ struct sk_buff *skb = NULL;
+ struct bpf_list_head *head;
+ struct bpf_list_node *node;
+ struct bpf_spin_lock *lock;
+ struct fq_flow_node *flow;
+ struct skb_node *skbn;
+ bool is_empty;
+ u32 *cnt;
+
+ if (q.new_flow_cnt) {
+ head = &fq_new_flows;
+ lock = &fq_new_flows_lock;
+ cnt = &q.new_flow_cnt;
+ } else if (q.old_flow_cnt) {
+ head = &fq_old_flows;
+ lock = &fq_old_flows_lock;
+ cnt = &q.old_flow_cnt;
+ } else {
+ if (q.time_next_delayed_flow != ~0ULL)
+ ctx->expire = q.time_next_delayed_flow;
+ goto break_loop;
+ }
+
+ fq_flows_remove_front(head, lock, &node, cnt);
+ if (!node)
+ goto break_loop;
+
+ flow = container_of(node, struct fq_flow_node, list_node);
+ if (flow->credit <= 0) {
+ flow->credit += q.quantum;
+ fq_flows_add_tail(&fq_old_flows, &fq_old_flows_lock, flow, &q.old_flow_cnt);
+ return NULL;
+ }
+
+ bpf_spin_lock(&flow->lock);
+ rb_node = bpf_rbtree_first(&flow->queue);
+ if (!rb_node) {
+ bpf_spin_unlock(&flow->lock);
+ is_empty = fq_flows_is_empty(&fq_old_flows, &fq_old_flows_lock);
+ if (head == &fq_new_flows && !is_empty) {
+ fq_flows_add_tail(&fq_old_flows, &fq_old_flows_lock, flow, &q.old_flow_cnt);
+ } else {
+ fq_flow_set_detached(flow);
+ bpf_obj_drop(flow);
+ }
+ return NULL;
+ }
+
+ skbn = container_of(rb_node, struct skb_node, node);
+ time_to_send = skbn->tstamp;
+
+ time_next_packet = (time_to_send > flow->time_next_packet) ?
+ time_to_send : flow->time_next_packet;
+ if (ctx->now < time_next_packet) {
+ bpf_spin_unlock(&flow->lock);
+ flow->time_next_packet = time_next_packet;
+ fq_flow_set_throttled(flow);
+ return NULL;
+ }
+
+ rb_node = bpf_rbtree_remove(&flow->queue, rb_node);
+ bpf_spin_unlock(&flow->lock);
+
+ if (!rb_node)
+ goto add_flow_and_break;
+
+ skbn = container_of(rb_node, struct skb_node, node);
+ skb = bpf_kptr_xchg(&skbn->skb, skb);
+ bpf_obj_drop(skbn);
+
+ if (!skb)
+ goto add_flow_and_break;
+
+ flow->credit -= qdisc_skb_cb(skb)->pkt_len;
+ flow->qlen--;
+
+add_flow_and_break:
+ fq_flows_add_head(head, lock, flow, cnt);
+
+break_loop:
+ ctx->stop_iter = true;
+ return skb;
+}
+
+static struct sk_buff *fq_dequeue_prio(void)
+{
+ struct fq_flow_node *flow = NULL;
+ struct fq_stashed_flow *sflow;
+ struct bpf_rb_node *rb_node;
+ struct sk_buff *skb = NULL;
+ struct skb_node *skbn;
+ u64 hash = 0;
+
+ sflow = bpf_map_lookup_elem(&fq_prio_flows, &hash);
+ if (!sflow)
+ return NULL;
+
+ flow = bpf_kptr_xchg(&sflow->flow, flow);
+ if (!flow)
+ return NULL;
+
+ bpf_spin_lock(&flow->lock);
+ rb_node = bpf_rbtree_first(&flow->queue);
+ if (!rb_node) {
+ bpf_spin_unlock(&flow->lock);
+ goto out;
+ }
+
+ skbn = container_of(rb_node, struct skb_node, node);
+ rb_node = bpf_rbtree_remove(&flow->queue, &skbn->node);
+ bpf_spin_unlock(&flow->lock);
+
+ if (!rb_node)
+ goto out;
+
+ skbn = container_of(rb_node, struct skb_node, node);
+ skb = bpf_kptr_xchg(&skbn->skb, skb);
+ bpf_obj_drop(skbn);
+
+out:
+ bpf_kptr_xchg_back(&sflow->flow, flow);
+
+ return skb;
+}
+
+SEC("struct_ops/bpf_fq_dequeue")
+struct sk_buff *BPF_PROG(bpf_fq_dequeue, struct Qdisc *sch)
+{
+ struct dequeue_nonprio_ctx cb_ctx = {};
+ struct sk_buff *skb = NULL;
+ int i;
+
+ if (!sch->q.qlen)
+ goto out;
+
+ skb = fq_dequeue_prio();
+ if (skb)
+ goto dequeue;
+
+ q.ktime_cache = cb_ctx.now = bpf_ktime_get_ns();
+ fq_check_throttled(q.ktime_cache);
+ bpf_for(i, 0, sch->limit) {
+ skb = fq_dequeue_nonprio_flows(i, &cb_ctx);
+ if (cb_ctx.stop_iter)
+ break;
+ };
+
+ if (skb) {
+dequeue:
+ sch->q.qlen--;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ bpf_qdisc_bstats_update(sch, skb);
+ return skb;
+ }
+
+ if (cb_ctx.expire)
+ bpf_qdisc_watchdog_schedule(sch, cb_ctx.expire, q.timer_slack);
+out:
+ return NULL;
+}
+
+static int fq_remove_flows_in_list(u32 index, void *ctx)
+{
+ struct bpf_list_node *node;
+ struct fq_flow_node *flow;
+
+ bpf_spin_lock(&fq_new_flows_lock);
+ node = bpf_list_pop_front(&fq_new_flows);
+ bpf_spin_unlock(&fq_new_flows_lock);
+ if (!node) {
+ bpf_spin_lock(&fq_old_flows_lock);
+ node = bpf_list_pop_front(&fq_old_flows);
+ bpf_spin_unlock(&fq_old_flows_lock);
+ if (!node)
+ return 1;
+ }
+
+ flow = container_of(node, struct fq_flow_node, list_node);
+ bpf_obj_drop(flow);
+
+ return 0;
+}
+
+extern unsigned CONFIG_HZ __kconfig;
+
+/* limit number of collected flows per round */
+#define FQ_GC_MAX 8
+#define FQ_GC_AGE (3*CONFIG_HZ)
+
+static bool fq_gc_candidate(struct fq_flow_node *flow)
+{
+ u64 jiffies = bpf_jiffies64();
+
+ return fq_flow_is_detached(flow) &&
+ ((s64)(jiffies - (flow->age + FQ_GC_AGE)) > 0);
+}
+
+static int
+fq_remove_flows(struct bpf_map *flow_map, u64 *hash,
+ struct fq_stashed_flow *sflow, struct remove_flows_ctx *ctx)
+{
+ if (sflow->flow &&
+ (!ctx->gc_only || fq_gc_candidate(sflow->flow))) {
+ bpf_map_delete_elem(flow_map, hash);
+ ctx->reset_cnt++;
+ }
+
+ return ctx->reset_cnt < ctx->reset_max ? 0 : 1;
+}
+
+static void fq_gc(void)
+{
+ struct remove_flows_ctx cb_ctx = {
+ .gc_only = true,
+ .reset_cnt = 0,
+ .reset_max = FQ_GC_MAX,
+ };
+
+ bpf_for_each_map_elem(&fq_nonprio_flows, fq_remove_flows, &cb_ctx, 0);
+}
+
+SEC("struct_ops/bpf_fq_reset")
+void BPF_PROG(bpf_fq_reset, struct Qdisc *sch)
+{
+ struct unset_throttled_flows_ctx utf_ctx = {
+ .unset_all = true,
+ };
+ struct remove_flows_ctx rf_ctx = {
+ .gc_only = false,
+ .reset_cnt = 0,
+ .reset_max = NUM_QUEUE,
+ };
+ struct fq_stashed_flow *sflow;
+ u64 hash = 0;
+
+ sch->q.qlen = 0;
+ sch->qstats.backlog = 0;
+
+ bpf_for_each_map_elem(&fq_nonprio_flows, fq_remove_flows, &rf_ctx, 0);
+
+ rf_ctx.reset_cnt = 0;
+ bpf_for_each_map_elem(&fq_prio_flows, fq_remove_flows, &rf_ctx, 0);
+ fq_new_flow(&fq_prio_flows, &sflow, hash);
+
+ bpf_loop(NUM_QUEUE, fq_remove_flows_in_list, NULL, 0);
+ q.new_flow_cnt = 0;
+ q.old_flow_cnt = 0;
+
+ bpf_loop(NUM_QUEUE, fq_unset_throttled_flows, &utf_ctx, 0);
+}
+
+SEC("struct_ops/bpf_fq_init")
+int BPF_PROG(bpf_fq_init, struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = sch->dev_queue->dev;
+ u32 psched_mtu = dev->mtu + dev->hard_header_len;
+ struct fq_stashed_flow *sflow;
+ u64 hash = 0;
+
+ if (fq_new_flow(&fq_prio_flows, &sflow, hash) < 0)
+ return -ENOMEM;
+
+ sch->limit = 10000;
+ q.initial_quantum = 10 * psched_mtu;
+ q.quantum = 2 * psched_mtu;
+ q.flow_refill_delay = 40;
+ q.flow_plimit = 100;
+ q.horizon = 10ULL * NSEC_PER_SEC;
+ q.horizon_drop = 1;
+ q.orphan_mask = 1024 - 1;
+ q.timer_slack = 10 * NSEC_PER_USEC;
+ q.time_next_delayed_flow = ~0ULL;
+ q.unthrottle_latency_ns = 0ULL;
+ q.new_flow_cnt = 0;
+ q.old_flow_cnt = 0;
+
+ return 0;
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_fq_destroy, struct Qdisc *sch)
+{
+}
+
+SEC(".struct_ops")
+struct Qdisc_ops fq = {
+ .enqueue = (void *)bpf_fq_enqueue,
+ .dequeue = (void *)bpf_fq_dequeue,
+ .reset = (void *)bpf_fq_reset,
+ .init = (void *)bpf_fq_init,
+ .destroy = (void *)bpf_fq_destroy,
+ .id = "bpf_fq",
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
new file mode 100644
index 000000000000..9e7d9674ce2a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2022 Sony Group Corporation */
+#include <vmlinux.h>
+
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+int arg1 = 0;
+unsigned long arg2 = 0;
+unsigned long arg3 = 0;
+unsigned long arg4_cx = 0;
+unsigned long arg4 = 0;
+unsigned long arg5 = 0;
+
+int arg1_core = 0;
+unsigned long arg2_core = 0;
+unsigned long arg3_core = 0;
+unsigned long arg4_core_cx = 0;
+unsigned long arg4_core = 0;
+unsigned long arg5_core = 0;
+
+int option_syscall = 0;
+unsigned long arg2_syscall = 0;
+unsigned long arg3_syscall = 0;
+unsigned long arg4_syscall = 0;
+unsigned long arg5_syscall = 0;
+
+const volatile pid_t filter_pid = 0;
+
+SEC("kprobe/" SYS_PREFIX "sys_prctl")
+int BPF_KPROBE(handle_sys_prctl)
+{
+ struct pt_regs *real_regs;
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+ unsigned long tmp = 0;
+
+ if (pid != filter_pid)
+ return 0;
+
+ real_regs = PT_REGS_SYSCALL_REGS(ctx);
+
+ /* test for PT_REGS_PARM */
+
+ bpf_probe_read_kernel(&tmp, sizeof(tmp), &PT_REGS_PARM1_SYSCALL(real_regs));
+ arg1 = tmp;
+ bpf_probe_read_kernel(&arg2, sizeof(arg2), &PT_REGS_PARM2_SYSCALL(real_regs));
+ bpf_probe_read_kernel(&arg3, sizeof(arg3), &PT_REGS_PARM3_SYSCALL(real_regs));
+ bpf_probe_read_kernel(&arg4_cx, sizeof(arg4_cx), &PT_REGS_PARM4(real_regs));
+ bpf_probe_read_kernel(&arg4, sizeof(arg4), &PT_REGS_PARM4_SYSCALL(real_regs));
+ bpf_probe_read_kernel(&arg5, sizeof(arg5), &PT_REGS_PARM5_SYSCALL(real_regs));
+
+ /* test for the CORE variant of PT_REGS_PARM */
+ arg1_core = PT_REGS_PARM1_CORE_SYSCALL(real_regs);
+ arg2_core = PT_REGS_PARM2_CORE_SYSCALL(real_regs);
+ arg3_core = PT_REGS_PARM3_CORE_SYSCALL(real_regs);
+ arg4_core_cx = PT_REGS_PARM4_CORE(real_regs);
+ arg4_core = PT_REGS_PARM4_CORE_SYSCALL(real_regs);
+ arg5_core = PT_REGS_PARM5_CORE_SYSCALL(real_regs);
+
+ return 0;
+}
+
+SEC("ksyscall/prctl")
+int BPF_KSYSCALL(prctl_enter, int option, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != filter_pid)
+ return 0;
+
+ option_syscall = option;
+ arg2_syscall = arg2;
+ arg3_syscall = arg3;
+ arg4_syscall = arg4;
+ arg5_syscall = arg5;
+ return 0;
+}
+
+__u64 splice_fd_in;
+__u64 splice_off_in;
+__u64 splice_fd_out;
+__u64 splice_off_out;
+__u64 splice_len;
+__u64 splice_flags;
+
+SEC("ksyscall/splice")
+int BPF_KSYSCALL(splice_enter, int fd_in, loff_t *off_in, int fd_out,
+ loff_t *off_out, size_t len, unsigned int flags)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != filter_pid)
+ return 0;
+
+ splice_fd_in = fd_in;
+ splice_off_in = (__u64)off_in;
+ splice_fd_out = fd_out;
+ splice_off_out = (__u64)off_out;
+ splice_len = len;
+ splice_flags = flags;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c b/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c
new file mode 100644
index 000000000000..8a7a4c1b54e8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "X";
+
+SEC("struct_ops")
+void BPF_PROG(nogpltcp_init, struct sock *sk)
+{
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops bpf_nogpltcp = {
+ .init = (void *)nogpltcp_init,
+ .name = "bpf_nogpltcp",
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
index 01378911252b..17db400f0e0d 100644
--- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
+++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
@@ -2,14 +2,93 @@
#ifndef __BPF_TRACING_NET_H__
#define __BPF_TRACING_NET_H__
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
+
#define AF_INET 2
#define AF_INET6 10
+#define SOL_SOCKET 1
+#define SO_REUSEADDR 2
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_KEEPALIVE 9
+#define SO_PRIORITY 12
+#define SO_REUSEPORT 15
+#if defined(__TARGET_ARCH_powerpc)
+#define SO_RCVLOWAT 16
+#else
+#define SO_RCVLOWAT 18
+#endif
+#define SO_BINDTODEVICE 25
+#define SO_MARK 36
+#define SO_MAX_PACING_RATE 47
+#define SO_BINDTOIFINDEX 62
+#define SO_TXREHASH 74
+#define __SO_ACCEPTCON (1 << 16)
+
+#define IP_TOS 1
+
+#define SOL_IPV6 41
+#define IPV6_TCLASS 67
+#define IPV6_AUTOFLOWLABEL 70
+
+#define TC_ACT_UNSPEC (-1)
+#define TC_ACT_OK 0
+#define TC_ACT_SHOT 2
+
+#define SOL_TCP 6
+#define TCP_NODELAY 1
+#define TCP_MAXSEG 2
+#define TCP_KEEPIDLE 4
+#define TCP_KEEPINTVL 5
+#define TCP_KEEPCNT 6
+#define TCP_SYNCNT 7
+#define TCP_WINDOW_CLAMP 10
+#define TCP_CONGESTION 13
+#define TCP_THIN_LINEAR_TIMEOUTS 16
+#define TCP_USER_TIMEOUT 18
+#define TCP_NOTSENT_LOWAT 25
+#define TCP_SAVE_SYN 27
+#define TCP_SAVED_SYN 28
+#define TCP_CA_NAME_MAX 16
+#define TCP_NAGLE_OFF 1
+#define TCP_RTO_MAX_MS 44
+
+#define TCP_ECN_OK 1
+#define TCP_ECN_QUEUE_CWR 2
+#define TCP_ECN_DEMAND_CWR 4
+#define TCP_ECN_SEEN 8
+
+#define TCP_CONG_NEEDS_ECN 0x2
+
#define ICSK_TIME_RETRANS 1
#define ICSK_TIME_PROBE0 3
#define ICSK_TIME_LOSS_PROBE 5
#define ICSK_TIME_REO_TIMEOUT 6
+#define ETH_ALEN 6
+#define ETH_HLEN 14
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86DD
+
+#define NEXTHDR_TCP 6
+
+#define TCPOPT_NOP 1
+#define TCPOPT_EOL 0
+#define TCPOPT_MSS 2
+#define TCPOPT_WINDOW 3
+#define TCPOPT_TIMESTAMP 8
+#define TCPOPT_SACK_PERM 4
+
+#define TCPOLEN_MSS 4
+#define TCPOLEN_WINDOW 3
+#define TCPOLEN_TIMESTAMP 10
+#define TCPOLEN_SACK_PERM 2
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_PARTIAL 3
+
#define IFNAMSIZ 16
#define RTF_GATEWAY 0x0002
@@ -17,6 +96,14 @@
#define TCP_INFINITE_SSTHRESH 0x7fffffff
#define TCP_PINGPONG_THRESH 3
+#define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */
+#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
+#define FLAG_DATA_SACKED 0x20 /* New SACK. */
+#define FLAG_SND_UNA_ADVANCED \
+ 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
+#define FLAG_ACKED (FLAG_DATA_ACKED | FLAG_SYN_ACKED)
+#define FLAG_FORWARD_PROGRESS (FLAG_ACKED | FLAG_DATA_SACKED)
+
#define fib_nh_dev nh_common.nhc_dev
#define fib_nh_gw_family nh_common.nhc_gw_family
#define fib_nh_gw6 nh_common.nhc_gw.ipv6
@@ -25,6 +112,8 @@
#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr
#define inet_dport sk.__sk_common.skc_dport
+#define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
+
#define ir_loc_addr req.__req_common.skc_rcv_saddr
#define ir_num req.__req_common.skc_num
#define ir_rmt_addr req.__req_common.skc_daddr
@@ -32,12 +121,19 @@
#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr
#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr
+#define sk_num __sk_common.skc_num
+#define sk_dport __sk_common.skc_dport
#define sk_family __sk_common.skc_family
#define sk_rmem_alloc sk_backlog.rmem_alloc
#define sk_refcnt __sk_common.skc_refcnt
#define sk_state __sk_common.skc_state
+#define sk_net __sk_common.skc_net
+#define sk_rcv_saddr __sk_common.skc_rcv_saddr
#define sk_v6_daddr __sk_common.skc_v6_daddr
#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
+#define sk_flags __sk_common.skc_flags
+#define sk_reuse __sk_common.skc_reuse
+#define sk_cookie __sk_common.skc_cookie
#define s6_addr32 in6_u.u6_addr32
@@ -48,4 +144,37 @@
#define tw_v6_daddr __tw_common.skc_v6_daddr
#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr
+#define tcp_jiffies32 ((__u32)bpf_jiffies64())
+
+static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
+{
+ return (struct inet_connection_sock *)sk;
+}
+
+static inline void *inet_csk_ca(const struct sock *sk)
+{
+ return (void *)inet_csk(sk)->icsk_ca_priv;
+}
+
+static inline struct tcp_sock *tcp_sk(const struct sock *sk)
+{
+ return (struct tcp_sock *)sk;
+}
+
+static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
+{
+ return tp->snd_cwnd < tp->snd_ssthresh;
+}
+
+static inline bool tcp_is_cwnd_limited(const struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ /* If in slow start, ensure cwnd grows to twice what was ACKed. */
+ if (tcp_in_slow_start(tp))
+ return tp->snd_cwnd < 2 * tp->max_packets_out;
+
+ return !!BPF_CORE_READ_BITFIELD(tp, is_cwnd_limited);
+}
+
#endif
diff --git a/tools/testing/selftests/bpf/progs/bprm_opts.c b/tools/testing/selftests/bpf/progs/bprm_opts.c
new file mode 100644
index 000000000000..418d9c6d4952
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bprm_opts.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020 Google LLC.
+ */
+
+#include <linux/bpf.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} secure_exec_task_map SEC(".maps");
+
+SEC("lsm/bprm_creds_for_exec")
+int BPF_PROG(secure_exec, struct linux_binprm *bprm)
+{
+ int *secureexec;
+
+ secureexec = bpf_task_storage_get(&secure_exec_task_map,
+ bpf_get_current_task_btf(), 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+
+ if (secureexec && *secureexec)
+ bpf_bprm_opts_set(bprm, BPF_F_BPRM_SECUREEXEC);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_signed_arr_elem_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_signed_arr_elem_sz.c
new file mode 100644
index 000000000000..21a560427b10
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_signed_arr_elem_sz.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_arrays___err_bad_signed_arr_elem_sz x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c
new file mode 100644
index 000000000000..888e79db6a77
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enum64val x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c
new file mode 100644
index 000000000000..194749130d87
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enum64val___diff x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c
new file mode 100644
index 000000000000..3d732d4193e4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enum64val___err_missing x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c
new file mode 100644
index 000000000000..17cf5d6a848d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enum64val___val3_missing x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
deleted file mode 100644
index dd0ffa518f36..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
deleted file mode 100644
index bc83372088ad..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
deleted file mode 100644
index 917bec41be08..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_int_kind x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
deleted file mode 100644
index 6ec7e6ec1c91..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_int_sz x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
deleted file mode 100644
index 7bbcacf2b0d1..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_int_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
deleted file mode 100644
index f384dd38ec70..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_struct_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
new file mode 100644
index 000000000000..d14b496190c3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_existence___wrong_field_defs x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c
new file mode 100644
index 000000000000..3824345d82ab
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_size___diff_offs x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c
new file mode 100644
index 000000000000..57ae2c258928
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_type_based___diff x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
index 8f44767a75fa..e01690618e1e 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
@@ -11,7 +11,7 @@
/*
*struct bitfields_only_mixed_types {
* int a: 3;
- * long int b: 2;
+ * long b: 2;
* _Bool c: 1;
* enum {
* A = 0,
@@ -27,7 +27,7 @@
struct bitfields_only_mixed_types {
int a: 3;
- long int b: 2;
+ long b: 2;
bool c: 1; /* it's really a _Bool type */
enum {
A, /* A = 0, dumper is very explicit */
@@ -44,8 +44,8 @@ struct bitfields_only_mixed_types {
* char: 4;
* int a: 4;
* short b;
- * long int c;
- * long int d: 8;
+ * long c;
+ * long d: 8;
* int e;
* int f;
*};
@@ -53,7 +53,7 @@ struct bitfields_only_mixed_types {
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct bitfield_mixed_with_others {
- long: 4; /* char is enough as a backing field */
+ char: 4; /* char is enough as a backing field */
int a: 4;
/* 8-bit implicit padding */
short b; /* combined with previous bitfield */
@@ -71,7 +71,7 @@ struct bitfield_mixed_with_others {
*struct bitfield_flushed {
* int a: 4;
* long: 60;
- * long int b: 16;
+ * long b: 16;
*};
*
*/
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
index ba97165bdb28..a657651eba52 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
@@ -14,9 +14,9 @@ typedef int *ptr_arr_t[6];
typedef int *ptr_multiarr_t[7][8][9][10];
-typedef int * (*fn_ptr_arr_t[11])();
+typedef int * (*fn_ptr_arr_t[11])(void);
-typedef int * (*fn_ptr_multiarr_t[12][13])();
+typedef int * (*fn_ptr_multiarr_t[12][13])(void);
struct root_struct {
arr_t _1;
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
index 1cef3bec1dc7..7998f27df7dd 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
@@ -29,7 +29,7 @@ struct non_packed_fields {
struct nested_packed {
char: 4;
int a: 4;
- long int b;
+ long b;
struct {
char c;
int d;
@@ -44,7 +44,7 @@ union union_is_never_packed {
union union_does_not_need_packing {
struct {
- long int a;
+ long a;
int b;
} __attribute__((packed));
int c;
@@ -58,7 +58,81 @@ union jump_code_union {
} __attribute__((packed));
};
-/*------ END-EXPECTED-OUTPUT ------ */
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *struct nested_packed_but_aligned_struct {
+ * int x1;
+ * int x2;
+ *};
+ *
+ *struct outer_implicitly_packed_struct {
+ * char y1;
+ * struct nested_packed_but_aligned_struct y2;
+ *} __attribute__((packed));
+ *
+ */
+/* ------ END-EXPECTED-OUTPUT ------ */
+
+struct nested_packed_but_aligned_struct {
+ int x1;
+ int x2;
+} __attribute__((packed));
+
+struct outer_implicitly_packed_struct {
+ char y1;
+ struct nested_packed_but_aligned_struct y2;
+};
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *struct usb_ss_ep_comp_descriptor {
+ * char: 8;
+ * char bDescriptorType;
+ * char bMaxBurst;
+ * short wBytesPerInterval;
+ *};
+ *
+ *struct usb_host_endpoint {
+ * long: 64;
+ * char: 8;
+ * struct usb_ss_ep_comp_descriptor ss_ep_comp;
+ * long: 0;
+ *} __attribute__((packed));
+ *
+ */
+/* ------ END-EXPECTED-OUTPUT ------ */
+
+struct usb_ss_ep_comp_descriptor {
+ char: 8;
+ char bDescriptorType;
+ char bMaxBurst;
+ int: 0;
+ short wBytesPerInterval;
+} __attribute__((packed));
+
+struct usb_host_endpoint {
+ long: 64;
+ char: 8;
+ struct usb_ss_ep_comp_descriptor ss_ep_comp;
+ long: 0;
+};
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+struct nested_packed_struct {
+ int a;
+ char b;
+} __attribute__((packed));
+
+struct outer_nonpacked_struct {
+ short a;
+ struct nested_packed_struct b;
+};
+
+struct outer_packed_struct {
+ short a;
+ struct nested_packed_struct b;
+} __attribute__((packed));
+
+/* ------ END-EXPECTED-OUTPUT ------ */
int f(struct {
struct packed_trailing_space _1;
@@ -69,6 +143,10 @@ int f(struct {
union union_is_never_packed _6;
union union_does_not_need_packing _7;
union jump_code_union _8;
+ struct outer_implicitly_packed_struct _9;
+ struct usb_host_endpoint _10;
+ struct outer_nonpacked_struct _11;
+ struct outer_packed_struct _12;
} *_)
{
return 0;
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
index 35c512818a56..79276fbe454a 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
@@ -9,7 +9,7 @@
/* ----- START-EXPECTED-OUTPUT ----- */
struct padded_implicitly {
int a;
- long int b;
+ long b;
char c;
};
@@ -19,7 +19,7 @@ struct padded_implicitly {
/*
*struct padded_explicitly {
* int a;
- * int: 32;
+ * long: 0;
* int b;
*};
*
@@ -28,41 +28,28 @@ struct padded_implicitly {
struct padded_explicitly {
int a;
- int: 1; /* algo will explicitly pad with full 32 bits here */
+ int: 1; /* algo will emit aligning `long: 0;` here */
int b;
};
/* ----- START-EXPECTED-OUTPUT ----- */
-/*
- *struct padded_a_lot {
- * int a;
- * long: 32;
- * long: 64;
- * long: 64;
- * int b;
- *};
- *
- */
-/* ------ END-EXPECTED-OUTPUT ------ */
-
struct padded_a_lot {
int a;
- /* 32 bit of implicit padding here, which algo will make explicit */
long: 64;
long: 64;
int b;
};
+/* ------ END-EXPECTED-OUTPUT ------ */
+
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct padded_cache_line {
* int a;
- * long: 32;
* long: 64;
* long: 64;
* long: 64;
* int b;
- * long: 32;
* long: 64;
* long: 64;
* long: 64;
@@ -85,7 +72,7 @@ struct padded_cache_line {
*struct zone {
* int a;
* short b;
- * short: 16;
+ * long: 0;
* struct zone_padding __pad__;
*};
*
@@ -102,12 +89,160 @@ struct zone {
struct zone_padding __pad__;
};
+/* ----- START-EXPECTED-OUTPUT ----- */
+struct padding_wo_named_members {
+ long: 64;
+ long: 64;
+};
+
+struct padding_weird_1 {
+ int a;
+ long: 64;
+ short: 16;
+ short b;
+};
+
+/* ------ END-EXPECTED-OUTPUT ------ */
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *struct padding_weird_2 {
+ * long: 56;
+ * char a;
+ * long: 56;
+ * char b;
+ * char: 8;
+ *};
+ *
+ */
+/* ------ END-EXPECTED-OUTPUT ------ */
+struct padding_weird_2 {
+ int: 32; /* these paddings will be collapsed into `long: 56;` */
+ short: 16;
+ char: 8;
+ char a;
+ int: 32; /* these paddings will be collapsed into `long: 56;` */
+ short: 16;
+ char: 8;
+ char b;
+ char: 8;
+};
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+struct exact_1byte {
+ char x;
+};
+
+struct padded_1byte {
+ char: 8;
+};
+
+struct exact_2bytes {
+ short x;
+};
+
+struct padded_2bytes {
+ short: 16;
+};
+
+struct exact_4bytes {
+ int x;
+};
+
+struct padded_4bytes {
+ int: 32;
+};
+
+struct exact_8bytes {
+ long x;
+};
+
+struct padded_8bytes {
+ long: 64;
+};
+
+struct ff_periodic_effect {
+ int: 32;
+ short magnitude;
+ long: 0;
+ short phase;
+ long: 0;
+ int: 32;
+ int custom_len;
+ short *custom_data;
+};
+
+struct ib_wc {
+ long: 64;
+ long: 64;
+ int: 32;
+ int byte_len;
+ void *qp;
+ union {} ex;
+ long: 64;
+ int slid;
+ int wc_flags;
+ long: 64;
+ char smac[6];
+ long: 0;
+ char network_hdr_type;
+};
+
+struct acpi_object_method {
+ long: 64;
+ char: 8;
+ char type;
+ short reference_count;
+ char flags;
+ short: 0;
+ char: 8;
+ char sync_level;
+ long: 64;
+ void *node;
+ void *aml_start;
+ union {} dispatch;
+ long: 64;
+ int aml_length;
+};
+
+struct nested_unpacked {
+ int x;
+};
+
+struct nested_packed {
+ struct nested_unpacked a;
+ char c;
+} __attribute__((packed));
+
+struct outer_mixed_but_unpacked {
+ struct nested_packed b1;
+ short a1;
+ struct nested_packed b2;
+};
+
+/* ------ END-EXPECTED-OUTPUT ------ */
+
int f(struct {
struct padded_implicitly _1;
struct padded_explicitly _2;
struct padded_a_lot _3;
struct padded_cache_line _4;
struct zone _5;
+ struct padding_wo_named_members _6;
+ struct padding_weird_1 _7;
+ struct padding_weird_2 _8;
+ struct exact_1byte _100;
+ struct padded_1byte _101;
+ struct exact_2bytes _102;
+ struct padded_2bytes _103;
+ struct exact_4bytes _104;
+ struct padded_4bytes _105;
+ struct exact_8bytes _106;
+ struct padded_8bytes _107;
+ struct ff_periodic_effect _200;
+ struct ib_wc _201;
+ struct acpi_object_method _202;
+ struct outer_mixed_but_unpacked _203;
} *_)
{
return 0;
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
index 31975c96e2c9..29d01fff32bd 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
@@ -25,6 +25,39 @@ typedef enum {
H = 2,
} e3_t;
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *enum e_byte {
+ * EBYTE_1 = 0,
+ * EBYTE_2 = 1,
+ *} __attribute__((mode(byte)));
+ *
+ */
+/* ----- END-EXPECTED-OUTPUT ----- */
+enum e_byte {
+ EBYTE_1,
+ EBYTE_2,
+} __attribute__((mode(byte)));
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *enum e_word {
+ * EWORD_1 = 0LL,
+ * EWORD_2 = 1LL,
+ *} __attribute__((mode(word)));
+ *
+ */
+/* ----- END-EXPECTED-OUTPUT ----- */
+enum e_word {
+ EWORD_1,
+ EWORD_2,
+} __attribute__((mode(word))); /* force to use 8-byte backing for this enum */
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+enum e_big {
+ EBIG_1 = 1000000000000ULL,
+};
+
typedef int int_t;
typedef volatile const int * volatile const crazy_ptr_t;
@@ -51,7 +84,7 @@ typedef void (*printf_fn_t)(const char *, ...);
* typedef int (*fn_t)(int);
* typedef char * const * (*fn_ptr2_t)(s_t, fn_t);
*
- * - `fn_complext_t`: pointer to a function returning struct and accepting
+ * - `fn_complex_t`: pointer to a function returning struct and accepting
* union and struct. All structs and enum are anonymous and defined inline.
*
* - `signal_t: pointer to a function accepting a pointer to a function as an
@@ -67,7 +100,7 @@ typedef void (*printf_fn_t)(const char *, ...);
* `int -> char *` function and returns pointer to a char. Equivalent:
* typedef char * (*fn_input_t)(int);
* typedef char * (*fn_output_outer_t)(fn_input_t);
- * typedef const fn_output_outer_t (* fn_output_inner_t)();
+ * typedef const fn_output_outer_t (* fn_output_inner_t)(void);
* typedef const fn_output_inner_t fn_ptr_arr2_t[5];
*/
/* ----- START-EXPECTED-OUTPUT ----- */
@@ -94,7 +127,7 @@ typedef void (* (*signal_t)(int, void (*)(int)))(int);
typedef char * (*fn_ptr_arr1_t[10])(int **);
-typedef char * (* const (* const fn_ptr_arr2_t[5])())(char * (*)(int));
+typedef char * (* (* const fn_ptr_arr2_t[5])(void))(char * (*)(int));
struct struct_w_typedefs {
int_t a;
@@ -174,6 +207,12 @@ struct struct_in_struct {
};
};
+struct struct_in_array {};
+
+struct struct_in_array_typed {};
+
+typedef struct struct_in_array_typed struct_in_array_t[2];
+
struct struct_with_embedded_stuff {
int a;
struct {
@@ -183,7 +222,7 @@ struct struct_with_embedded_stuff {
const char *d;
} e;
union {
- volatile long int f;
+ volatile long f;
void * restrict g;
};
};
@@ -203,6 +242,14 @@ struct struct_with_embedded_stuff {
} r[5];
struct struct_in_struct s[10];
int t[11];
+ struct struct_in_array (*u)[2];
+ struct_in_array_t *v;
+};
+
+struct float_struct {
+ float f;
+ const double *d;
+ volatile long double *ld;
};
struct root_struct {
@@ -210,6 +257,9 @@ struct root_struct {
enum e2 _2;
e2_t _2_1;
e3_t _2_2;
+ enum e_byte _100;
+ enum e_word _101;
+ enum e_big _102;
struct struct_w_typedefs _3;
anon_struct_t _7;
struct struct_fwd *_8;
@@ -219,6 +269,7 @@ struct root_struct {
union_fwd_t *_12;
union_fwd_ptr_t _13;
struct struct_with_embedded_stuff _14;
+ struct float_struct _15;
};
/* ------ END-EXPECTED-OUTPUT ------ */
diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag.c b/tools/testing/selftests/bpf/progs/btf_type_tag.c
new file mode 100644
index 000000000000..1d488da7e920
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf_type_tag.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#if __has_attribute(btf_type_tag)
+#define __tag1 __attribute__((btf_type_tag("tag1")))
+#define __tag2 __attribute__((btf_type_tag("tag2")))
+volatile const bool skip_tests = false;
+#else
+#define __tag1
+#define __tag2
+volatile const bool skip_tests = true;
+#endif
+
+struct btf_type_tag_test {
+ int __tag1 * __tag1 __tag2 *p;
+} g;
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(sub, int x)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c
new file mode 100644
index 000000000000..d93f68024cc6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct bpf_testmod_btf_type_tag_1 {
+ int a;
+};
+
+struct bpf_testmod_btf_type_tag_2 {
+ struct bpf_testmod_btf_type_tag_1 *p;
+};
+
+__u64 g;
+
+SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_1")
+int BPF_PROG(test_percpu1, struct bpf_testmod_btf_type_tag_1 *arg)
+{
+ g = arg->a;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_2")
+int BPF_PROG(test_percpu2, struct bpf_testmod_btf_type_tag_2 *arg)
+{
+ g = arg->p->a;
+ return 0;
+}
+
+/* trace_cgroup_mkdir(struct cgroup *cgrp, const char *path)
+ *
+ * struct css_rstat_cpu {
+ * ...
+ * struct cgroup_subsys_state *updated_children;
+ * ...
+ * };
+ *
+ * struct cgroup_subsys_state {
+ * ...
+ * struct css_rstat_cpu __percpu *rstat_cpu;
+ * ...
+ * };
+ *
+ * struct cgroup {
+ * struct cgroup_subsys_state self;
+ * ...
+ * };
+ */
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_percpu_load, struct cgroup *cgrp, const char *path)
+{
+ g = (__u64)cgrp->self.rstat_cpu->updated_children;
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_percpu_helper, struct cgroup *cgrp, const char *path)
+{
+ struct css_rstat_cpu *rstat;
+ __u32 cpu;
+
+ cpu = bpf_get_smp_processor_id();
+ rstat = (struct css_rstat_cpu *)bpf_per_cpu_ptr(
+ cgrp->self.rstat_cpu, cpu);
+ if (rstat) {
+ /* READ_ONCE */
+ *(volatile long *)rstat;
+ }
+
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag_user.c b/tools/testing/selftests/bpf/progs/btf_type_tag_user.c
new file mode 100644
index 000000000000..5523f77c5a44
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf_type_tag_user.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct bpf_testmod_btf_type_tag_1 {
+ int a;
+};
+
+struct bpf_testmod_btf_type_tag_2 {
+ struct bpf_testmod_btf_type_tag_1 *p;
+};
+
+int g;
+
+SEC("fentry/bpf_testmod_test_btf_type_tag_user_1")
+int BPF_PROG(test_user1, struct bpf_testmod_btf_type_tag_1 *arg)
+{
+ g = arg->a;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_btf_type_tag_user_2")
+int BPF_PROG(test_user2, struct bpf_testmod_btf_type_tag_2 *arg)
+{
+ g = arg->p->a;
+ return 0;
+}
+
+/* int __sys_getsockname(int fd, struct sockaddr __user *usockaddr,
+ * int __user *usockaddr_len);
+ */
+SEC("fentry/__sys_getsockname")
+int BPF_PROG(test_sys_getsockname, int fd, struct sockaddr *usockaddr,
+ int *usockaddr_len)
+{
+ g = usockaddr->sa_family;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c
new file mode 100644
index 000000000000..5d6fc7f01ebb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cb_refs.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+struct map_value {
+ struct prog_test_ref_kfunc __kptr *ptr;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 16);
+} array_map SEC(".maps");
+
+static __noinline int cb1(void *map, void *key, void *value, void *ctx)
+{
+ void *p = *(void **)ctx;
+ bpf_kfunc_call_test_release(p);
+ /* Without the fix this would cause underflow */
+ return 0;
+}
+
+SEC("?tc")
+int underflow_prog(void *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ unsigned long sl = 0;
+
+ p = bpf_kfunc_call_test_acquire(&sl);
+ if (!p)
+ return 0;
+ bpf_for_each_map_elem(&array_map, cb1, &p, 0);
+ bpf_kfunc_call_test_release(p);
+ return 0;
+}
+
+static __always_inline int cb2(void *map, void *key, void *value, void *ctx)
+{
+ unsigned long sl = 0;
+
+ *(void **)ctx = bpf_kfunc_call_test_acquire(&sl);
+ /* Without the fix this would leak memory */
+ return 0;
+}
+
+SEC("?tc")
+int leak_prog(void *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+
+ v = bpf_map_lookup_elem(&array_map, &(int){0});
+ if (!v)
+ return 0;
+
+ p = NULL;
+ bpf_for_each_map_elem(&array_map, cb2, &p, 0);
+ p = bpf_kptr_xchg(&v->ptr, p);
+ if (p)
+ bpf_kfunc_call_test_release(p);
+ return 0;
+}
+
+static __always_inline int cb(void *map, void *key, void *value, void *ctx)
+{
+ return 0;
+}
+
+static __always_inline int cb3(void *map, void *key, void *value, void *ctx)
+{
+ unsigned long sl = 0;
+ void *p;
+
+ bpf_kfunc_call_test_acquire(&sl);
+ bpf_for_each_map_elem(&array_map, cb, &p, 0);
+ /* It should only complain here, not in cb. This is why we need
+ * callback_ref to be set to frameno.
+ */
+ return 0;
+}
+
+SEC("?tc")
+int nested_cb(void *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ unsigned long sl = 0;
+ int sp = 0;
+
+ p = bpf_kfunc_call_test_acquire(&sl);
+ if (!p)
+ return 0;
+ bpf_for_each_map_elem(&array_map, cb3, &sp, 0);
+ bpf_kfunc_call_test_release(p);
+ return 0;
+}
+
+SEC("?tc")
+int non_cb_transfer_ref(void *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ unsigned long sl = 0;
+
+ p = bpf_kfunc_call_test_acquire(&sl);
+ if (!p)
+ return 0;
+ cb1(NULL, NULL, NULL, &p);
+ bpf_kfunc_call_test_acquire(&sl);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi.h b/tools/testing/selftests/bpf/progs/cg_storage_multi.h
index a0778fe7857a..41d59f0ee606 100644
--- a/tools/testing/selftests/bpf/progs/cg_storage_multi.h
+++ b/tools/testing/selftests/bpf/progs/cg_storage_multi.h
@@ -3,8 +3,6 @@
#ifndef __PROGS_CG_STORAGE_MULTI_H
#define __PROGS_CG_STORAGE_MULTI_H
-#include <asm/types.h>
-
struct cgroup_value {
__u32 egress_pkts;
__u32 ingress_pkts;
diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c
index a25373002055..3f81ff92184c 100644
--- a/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c
+++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c
@@ -20,7 +20,7 @@ struct {
__u32 invocations = 0;
-SEC("cgroup_skb/egress/1")
+SEC("cgroup_skb/egress")
int egress1(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
@@ -32,7 +32,7 @@ int egress1(struct __sk_buff *skb)
return 1;
}
-SEC("cgroup_skb/egress/2")
+SEC("cgroup_skb/egress")
int egress2(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c
index a149f33bc533..d662db27fe4a 100644
--- a/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c
+++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c
@@ -20,7 +20,7 @@ struct {
__u32 invocations = 0;
-SEC("cgroup_skb/egress/1")
+SEC("cgroup_skb/egress")
int egress1(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
@@ -32,7 +32,7 @@ int egress1(struct __sk_buff *skb)
return 1;
}
-SEC("cgroup_skb/egress/2")
+SEC("cgroup_skb/egress")
int egress2(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
diff --git a/tools/testing/selftests/bpf/progs/cgroup_ancestor.c b/tools/testing/selftests/bpf/progs/cgroup_ancestor.c
new file mode 100644
index 000000000000..8c2deb4fc493
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_ancestor.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_tracing_net.h"
+#define NUM_CGROUP_LEVELS 4
+
+__u64 cgroup_ids[NUM_CGROUP_LEVELS];
+__u16 dport;
+
+static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level)
+{
+ /* [1] &level passed to external function that may change it, it's
+ * incompatible with loop unroll.
+ */
+ cgroup_ids[level] = bpf_skb_ancestor_cgroup_id(skb, level);
+}
+
+SEC("tc")
+int log_cgroup_id(struct __sk_buff *skb)
+{
+ struct sock *sk = (void *)skb->sk;
+
+ if (!sk)
+ return TC_ACT_OK;
+
+ sk = bpf_core_cast(sk, struct sock);
+ if (sk->sk_protocol == IPPROTO_UDP && sk->sk_dport == dport) {
+ log_nth_level(skb, 0);
+ log_nth_level(skb, 1);
+ log_nth_level(skb, 2);
+ log_nth_level(skb, 3);
+ }
+
+ return TC_ACT_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c
new file mode 100644
index 000000000000..932b8ecd4ae3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright 2021 Google LLC.
+ */
+
+#include <errno.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+__u32 invocations = 0;
+__u32 assertion_error = 0;
+__u32 retval_value = 0;
+__u32 ctx_retval_value = 0;
+__u32 page_size = 0;
+
+SEC("cgroup/getsockopt")
+int get_retval(struct bpf_sockopt *ctx)
+{
+ retval_value = bpf_get_retval();
+ ctx_retval_value = ctx->retval;
+ __sync_fetch_and_add(&invocations, 1);
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int set_eisconn(struct bpf_sockopt *ctx)
+{
+ __sync_fetch_and_add(&invocations, 1);
+
+ if (bpf_set_retval(-EISCONN))
+ assertion_error = 1;
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int clear_retval(struct bpf_sockopt *ctx)
+{
+ __sync_fetch_and_add(&invocations, 1);
+
+ ctx->retval = 0;
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c
new file mode 100644
index 000000000000..13dfb4bbfd28
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+#define BPF_RETVAL_HOOK(name, section, ctx, expected_err) \
+ __attribute__((__section__("?" section))) \
+ int name(struct ctx *_ctx) \
+ { \
+ bpf_set_retval(bpf_get_retval()); \
+ return 1; \
+ }
+
+#include "cgroup_getset_retval_hooks.h"
+
+#undef BPF_RETVAL_HOOK
diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c
new file mode 100644
index 000000000000..45a0e9f492a9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright 2021 Google LLC.
+ */
+
+#include <errno.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+__u32 invocations = 0;
+__u32 assertion_error = 0;
+__u32 retval_value = 0;
+__s32 page_size = 0;
+
+SEC("cgroup/setsockopt")
+int get_retval(struct bpf_sockopt *ctx)
+{
+ retval_value = bpf_get_retval();
+ __sync_fetch_and_add(&invocations, 1);
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 1;
+}
+
+SEC("cgroup/setsockopt")
+int set_eunatch(struct bpf_sockopt *ctx)
+{
+ __sync_fetch_and_add(&invocations, 1);
+
+ if (bpf_set_retval(-EUNATCH))
+ assertion_error = 1;
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 0;
+}
+
+SEC("cgroup/setsockopt")
+int set_eisconn(struct bpf_sockopt *ctx)
+{
+ __sync_fetch_and_add(&invocations, 1);
+
+ if (bpf_set_retval(-EISCONN))
+ assertion_error = 1;
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 0;
+}
+
+SEC("cgroup/setsockopt")
+int legacy_eperm(struct bpf_sockopt *ctx)
+{
+ __sync_fetch_and_add(&invocations, 1);
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c b/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c
new file mode 100644
index 000000000000..ff189a736ad8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022 Google LLC.
+ */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct percpu_attach_counter {
+ /* Previous percpu state, to figure out if we have new updates */
+ __u64 prev;
+ /* Current percpu state */
+ __u64 state;
+};
+
+struct attach_counter {
+ /* State propagated through children, pending aggregation */
+ __u64 pending;
+ /* Total state, including all cpus and all children */
+ __u64 state;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 1024);
+ __type(key, __u64);
+ __type(value, struct percpu_attach_counter);
+} percpu_attach_counters SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1024);
+ __type(key, __u64);
+ __type(value, struct attach_counter);
+} attach_counters SEC(".maps");
+
+extern void css_rstat_updated(
+ struct cgroup_subsys_state *css, int cpu) __ksym;
+extern void css_rstat_flush(struct cgroup_subsys_state *css) __ksym;
+
+static uint64_t cgroup_id(struct cgroup *cgrp)
+{
+ return cgrp->kn->id;
+}
+
+static int create_percpu_attach_counter(__u64 cg_id, __u64 state)
+{
+ struct percpu_attach_counter pcpu_init = {.state = state, .prev = 0};
+
+ return bpf_map_update_elem(&percpu_attach_counters, &cg_id,
+ &pcpu_init, BPF_NOEXIST);
+}
+
+static int create_attach_counter(__u64 cg_id, __u64 state, __u64 pending)
+{
+ struct attach_counter init = {.state = state, .pending = pending};
+
+ return bpf_map_update_elem(&attach_counters, &cg_id,
+ &init, BPF_NOEXIST);
+}
+
+SEC("fentry/cgroup_attach_task")
+int BPF_PROG(counter, struct cgroup *dst_cgrp, struct task_struct *leader,
+ bool threadgroup)
+{
+ __u64 cg_id = cgroup_id(dst_cgrp);
+ struct percpu_attach_counter *pcpu_counter = bpf_map_lookup_elem(
+ &percpu_attach_counters,
+ &cg_id);
+
+ if (pcpu_counter)
+ pcpu_counter->state += 1;
+ else if (create_percpu_attach_counter(cg_id, 1))
+ return 0;
+
+ css_rstat_updated(&dst_cgrp->self, bpf_get_smp_processor_id());
+ return 0;
+}
+
+SEC("fentry/bpf_rstat_flush")
+int BPF_PROG(flusher, struct cgroup *cgrp, struct cgroup *parent, int cpu)
+{
+ struct percpu_attach_counter *pcpu_counter;
+ struct attach_counter *total_counter, *parent_counter;
+ __u64 cg_id = cgroup_id(cgrp);
+ __u64 parent_cg_id = parent ? cgroup_id(parent) : 0;
+ __u64 state;
+ __u64 delta = 0;
+
+ /* Add CPU changes on this level since the last flush */
+ pcpu_counter = bpf_map_lookup_percpu_elem(&percpu_attach_counters,
+ &cg_id, cpu);
+ if (pcpu_counter) {
+ state = pcpu_counter->state;
+ delta += state - pcpu_counter->prev;
+ pcpu_counter->prev = state;
+ }
+
+ total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id);
+ if (!total_counter) {
+ if (create_attach_counter(cg_id, delta, 0))
+ return 0;
+ goto update_parent;
+ }
+
+ /* Collect pending stats from subtree */
+ if (total_counter->pending) {
+ delta += total_counter->pending;
+ total_counter->pending = 0;
+ }
+
+ /* Propagate changes to this cgroup's total */
+ total_counter->state += delta;
+
+update_parent:
+ /* Skip if there are no changes to propagate, or no parent */
+ if (!delta || !parent_cg_id)
+ return 0;
+
+ /* Propagate changes to cgroup's parent */
+ parent_counter = bpf_map_lookup_elem(&attach_counters,
+ &parent_cg_id);
+ if (parent_counter)
+ parent_counter->pending += delta;
+ else
+ create_attach_counter(parent_cg_id, 0, delta);
+ return 0;
+}
+
+SEC("iter.s/cgroup")
+int BPF_PROG(dumper, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+ struct seq_file *seq = meta->seq;
+ struct attach_counter *total_counter;
+ __u64 cg_id = cgrp ? cgroup_id(cgrp) : 0;
+
+ /* Do nothing for the terminal call */
+ if (!cg_id)
+ return 1;
+
+ /* Flush the stats to make sure we get the most updated numbers */
+ css_rstat_flush(&cgrp->self);
+
+ total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id);
+ if (!total_counter) {
+ BPF_SEQ_PRINTF(seq, "cg_id: %llu, attach_counter: 0\n",
+ cg_id);
+ } else {
+ BPF_SEQ_PRINTF(seq, "cg_id: %llu, attach_counter: %llu\n",
+ cg_id, total_counter->state);
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_iter.c b/tools/testing/selftests/bpf/progs/cgroup_iter.c
new file mode 100644
index 000000000000..f30841997a8d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_iter.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+int terminate_early = 0;
+u64 terminal_cgroup = 0;
+
+static inline u64 cgroup_id(struct cgroup *cgrp)
+{
+ return cgrp->kn->id;
+}
+
+SEC("iter/cgroup")
+int cgroup_id_printer(struct bpf_iter__cgroup *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct cgroup *cgrp = ctx->cgroup;
+
+ /* epilogue */
+ if (cgrp == NULL) {
+ BPF_SEQ_PRINTF(seq, "epilogue\n");
+ return 0;
+ }
+
+ /* prologue */
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(seq, "prologue\n");
+
+ BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp));
+
+ if (terminal_cgroup == cgroup_id(cgrp))
+ return 1;
+
+ return terminate_early ? 1 : 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_mprog.c b/tools/testing/selftests/bpf/progs/cgroup_mprog.c
new file mode 100644
index 000000000000..6a0ea02c4de2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_mprog.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("cgroup/getsockopt")
+int getsockopt_1(struct bpf_sockopt *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int getsockopt_2(struct bpf_sockopt *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int getsockopt_3(struct bpf_sockopt *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int getsockopt_4(struct bpf_sockopt *ctx)
+{
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_preorder.c b/tools/testing/selftests/bpf/progs/cgroup_preorder.c
new file mode 100644
index 000000000000..4ef6202baa0a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_preorder.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+unsigned int idx;
+__u8 result[4];
+
+SEC("cgroup/getsockopt")
+int child(struct bpf_sockopt *ctx)
+{
+ if (idx < 4)
+ result[idx++] = 1;
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int child_2(struct bpf_sockopt *ctx)
+{
+ if (idx < 4)
+ result[idx++] = 2;
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int parent(struct bpf_sockopt *ctx)
+{
+ if (idx < 4)
+ result[idx++] = 3;
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int parent_2(struct bpf_sockopt *ctx)
+{
+ if (idx < 4)
+ result[idx++] = 4;
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c b/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c
new file mode 100644
index 000000000000..092db1d0435e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+char value[16];
+
+static __always_inline void read_xattr(struct cgroup *cgroup)
+{
+ struct bpf_dynptr value_ptr;
+
+ bpf_dynptr_from_mem(value, sizeof(value), 0, &value_ptr);
+ bpf_cgroup_read_xattr(cgroup, "user.bpf_test",
+ &value_ptr);
+}
+
+SEC("lsm.s/socket_connect")
+__success
+int BPF_PROG(trusted_cgroup_ptr_sleepable)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ read_xattr(cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("lsm/socket_connect")
+__success
+int BPF_PROG(trusted_cgroup_ptr_non_sleepable)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ read_xattr(cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("lsm/socket_connect")
+__success
+int BPF_PROG(use_css_iter_non_sleepable)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup_subsys_state *css;
+ struct cgroup *cgrp;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP)
+ read_xattr(css->cgroup);
+
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("lsm.s/socket_connect")
+__failure __msg("expected an RCU CS")
+int BPF_PROG(use_css_iter_sleepable_missing_rcu_lock)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup_subsys_state *css;
+ struct cgroup *cgrp;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP)
+ read_xattr(css->cgroup);
+
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("lsm.s/socket_connect")
+__success
+int BPF_PROG(use_css_iter_sleepable_with_rcu_lock)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup_subsys_state *css;
+ struct cgroup *cgrp;
+
+ bpf_rcu_read_lock();
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ goto out;
+
+ bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP)
+ read_xattr(css->cgroup);
+
+ bpf_cgroup_release(cgrp);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("lsm/socket_connect")
+__success
+int BPF_PROG(use_bpf_cgroup_ancestor)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp, *ancestor;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ ancestor = bpf_cgroup_ancestor(cgrp, 1);
+ if (!ancestor)
+ goto out;
+
+ read_xattr(cgrp);
+ bpf_cgroup_release(ancestor);
+out:
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("cgroup/sendmsg4")
+__success
+int BPF_PROG(cgroup_skb)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp, *ancestor;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ ancestor = bpf_cgroup_ancestor(cgrp, 1);
+ if (!ancestor)
+ goto out;
+
+ read_xattr(cgrp);
+ bpf_cgroup_release(ancestor);
+out:
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c b/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c
new file mode 100644
index 000000000000..e32b07d802bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+__u32 data_end;
+
+SEC("cgroup_skb/ingress")
+int direct_packet_access(struct __sk_buff *skb)
+{
+ data_end = skb->data_end;
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c
index 3f757e30d7a0..ac86a8a61605 100644
--- a/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c
@@ -14,7 +14,6 @@
#include <sys/types.h>
#include <sys/socket.h>
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
__u16 g_serv_port = 0;
@@ -67,7 +66,6 @@ static inline int is_allowed_peer_cg(struct __sk_buff *skb,
SEC("cgroup_skb/ingress")
int ingress_lookup(struct __sk_buff *skb)
{
- __u32 serv_port_key = 0;
struct ipv6hdr ip6h;
struct tcphdr tcph;
diff --git a/tools/testing/selftests/bpf/progs/cgroup_storage.c b/tools/testing/selftests/bpf/progs/cgroup_storage.c
new file mode 100644
index 000000000000..db1e4d2d3281
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_storage.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
+ __type(key, struct bpf_cgroup_storage_key);
+ __type(value, __u64);
+} cgroup_storage SEC(".maps");
+
+SEC("cgroup_skb/egress")
+int bpf_prog(struct __sk_buff *skb)
+{
+ __u64 *counter;
+
+ counter = bpf_get_local_storage(&cgroup_storage, 0);
+ __sync_fetch_and_add(counter, 1);
+
+ /* Drop one out of every two packets */
+ return (*counter & 1);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/cgroup_tcp_skb.c b/tools/testing/selftests/bpf/progs/cgroup_tcp_skb.c
new file mode 100644
index 000000000000..1e2e73f3b749
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_tcp_skb.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include <linux/bpf.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include "cgroup_tcp_skb.h"
+
+char _license[] SEC("license") = "GPL";
+
+__u16 g_sock_port = 0;
+__u32 g_sock_state = 0;
+int g_unexpected = 0;
+__u32 g_packet_count = 0;
+
+int needed_tcp_pkt(struct __sk_buff *skb, struct tcphdr *tcph)
+{
+ struct ipv6hdr ip6h;
+
+ if (skb->protocol != bpf_htons(ETH_P_IPV6))
+ return 0;
+ if (bpf_skb_load_bytes(skb, 0, &ip6h, sizeof(ip6h)))
+ return 0;
+
+ if (ip6h.nexthdr != IPPROTO_TCP)
+ return 0;
+
+ if (bpf_skb_load_bytes(skb, sizeof(ip6h), tcph, sizeof(*tcph)))
+ return 0;
+
+ if (tcph->source != bpf_htons(g_sock_port) &&
+ tcph->dest != bpf_htons(g_sock_port))
+ return 0;
+
+ return 1;
+}
+
+/* Run accept() on a socket in the cgroup to receive a new connection. */
+static int egress_accept(struct tcphdr *tcph)
+{
+ if (g_sock_state == SYN_RECV_SENDING_SYN_ACK) {
+ if (tcph->fin || !tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = SYN_RECV;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int ingress_accept(struct tcphdr *tcph)
+{
+ switch (g_sock_state) {
+ case INIT:
+ if (!tcph->syn || tcph->fin || tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = SYN_RECV_SENDING_SYN_ACK;
+ break;
+ case SYN_RECV:
+ if (tcph->fin || tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = ESTABLISHED;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Run connect() on a socket in the cgroup to start a new connection. */
+static int egress_connect(struct tcphdr *tcph)
+{
+ if (g_sock_state == INIT) {
+ if (!tcph->syn || tcph->fin || tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = SYN_SENT;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int ingress_connect(struct tcphdr *tcph)
+{
+ if (g_sock_state == SYN_SENT) {
+ if (tcph->fin || !tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = ESTABLISHED;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* The connection is closed by the peer outside the cgroup. */
+static int egress_close_remote(struct tcphdr *tcph)
+{
+ switch (g_sock_state) {
+ case ESTABLISHED:
+ break;
+ case CLOSE_WAIT_SENDING_ACK:
+ if (tcph->fin || tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = CLOSE_WAIT;
+ break;
+ case CLOSE_WAIT:
+ if (!tcph->fin)
+ g_unexpected++;
+ else
+ g_sock_state = LAST_ACK;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static int ingress_close_remote(struct tcphdr *tcph)
+{
+ switch (g_sock_state) {
+ case ESTABLISHED:
+ if (tcph->fin)
+ g_sock_state = CLOSE_WAIT_SENDING_ACK;
+ break;
+ case LAST_ACK:
+ if (tcph->fin || tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = CLOSED;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+/* The connection is closed by the endpoint inside the cgroup. */
+static int egress_close_local(struct tcphdr *tcph)
+{
+ switch (g_sock_state) {
+ case ESTABLISHED:
+ if (tcph->fin)
+ g_sock_state = FIN_WAIT1;
+ break;
+ case TIME_WAIT_SENDING_ACK:
+ if (tcph->fin || tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = TIME_WAIT;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static int ingress_close_local(struct tcphdr *tcph)
+{
+ switch (g_sock_state) {
+ case ESTABLISHED:
+ break;
+ case FIN_WAIT1:
+ if (tcph->fin || tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = FIN_WAIT2;
+ break;
+ case FIN_WAIT2:
+ if (!tcph->fin || tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = TIME_WAIT_SENDING_ACK;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Check the types of outgoing packets of a server socket to make sure they
+ * are consistent with the state of the server socket.
+ *
+ * The connection is closed by the client side.
+ */
+SEC("cgroup_skb/egress")
+int server_egress(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Egress of the server socket. */
+ if (egress_accept(&tcph) || egress_close_remote(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of incoming packets of a server socket to make sure they
+ * are consistent with the state of the server socket.
+ *
+ * The connection is closed by the client side.
+ */
+SEC("cgroup_skb/ingress")
+int server_ingress(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Ingress of the server socket. */
+ if (ingress_accept(&tcph) || ingress_close_remote(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of outgoing packets of a server socket to make sure they
+ * are consistent with the state of the server socket.
+ *
+ * The connection is closed by the server side.
+ */
+SEC("cgroup_skb/egress")
+int server_egress_srv(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Egress of the server socket. */
+ if (egress_accept(&tcph) || egress_close_local(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of incoming packets of a server socket to make sure they
+ * are consistent with the state of the server socket.
+ *
+ * The connection is closed by the server side.
+ */
+SEC("cgroup_skb/ingress")
+int server_ingress_srv(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Ingress of the server socket. */
+ if (ingress_accept(&tcph) || ingress_close_local(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of outgoing packets of a client socket to make sure they
+ * are consistent with the state of the client socket.
+ *
+ * The connection is closed by the server side.
+ */
+SEC("cgroup_skb/egress")
+int client_egress_srv(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Egress of the server socket. */
+ if (egress_connect(&tcph) || egress_close_remote(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of incoming packets of a client socket to make sure they
+ * are consistent with the state of the client socket.
+ *
+ * The connection is closed by the server side.
+ */
+SEC("cgroup_skb/ingress")
+int client_ingress_srv(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Ingress of the server socket. */
+ if (ingress_connect(&tcph) || ingress_close_remote(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of outgoing packets of a client socket to make sure they
+ * are consistent with the state of the client socket.
+ *
+ * The connection is closed by the client side.
+ */
+SEC("cgroup_skb/egress")
+int client_egress(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Egress of the server socket. */
+ if (egress_connect(&tcph) || egress_close_local(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of incoming packets of a client socket to make sure they
+ * are consistent with the state of the client socket.
+ *
+ * The connection is closed by the client side.
+ */
+SEC("cgroup_skb/ingress")
+int client_ingress(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Ingress of the server socket. */
+ if (ingress_connect(&tcph) || ingress_close_local(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h
new file mode 100644
index 000000000000..73ba32e9a693
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _CGRP_KFUNC_COMMON_H
+#define _CGRP_KFUNC_COMMON_H
+
+#include <errno.h>
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct __cgrps_kfunc_map_value {
+ struct cgroup __kptr * cgrp;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct __cgrps_kfunc_map_value);
+ __uint(max_entries, 1);
+} __cgrps_kfunc_map SEC(".maps");
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+static inline struct __cgrps_kfunc_map_value *cgrps_kfunc_map_value_lookup(struct cgroup *cgrp)
+{
+ s32 id;
+ long status;
+
+ status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
+ if (status)
+ return NULL;
+
+ return bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
+}
+
+static inline int cgrps_kfunc_map_insert(struct cgroup *cgrp)
+{
+ struct __cgrps_kfunc_map_value local, *v;
+ long status;
+ struct cgroup *acquired, *old;
+ s32 id;
+
+ status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
+ if (status)
+ return status;
+
+ local.cgrp = NULL;
+ status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST);
+ if (status)
+ return status;
+
+ v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
+ if (!v) {
+ bpf_map_delete_elem(&__cgrps_kfunc_map, &id);
+ return -ENOENT;
+ }
+
+ acquired = bpf_cgroup_acquire(cgrp);
+ if (!acquired) {
+ bpf_map_delete_elem(&__cgrps_kfunc_map, &id);
+ return -ENOENT;
+ }
+
+ old = bpf_kptr_xchg(&v->cgrp, acquired);
+ if (old) {
+ bpf_cgroup_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _CGRP_KFUNC_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
new file mode 100644
index 000000000000..9fe9c4a4e8f6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+#include "cgrp_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(cgroup_mkdir,
+ * TP_PROTO(struct cgroup *cgrp, const char *path),
+ * TP_ARGS(cgrp, path)
+ */
+
+static struct __cgrps_kfunc_map_value *insert_lookup_cgrp(struct cgroup *cgrp)
+{
+ int status;
+
+ status = cgrps_kfunc_map_insert(cgrp);
+ if (status)
+ return NULL;
+
+ return cgrps_kfunc_map_value_lookup(cgrp);
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+ struct __cgrps_kfunc_map_value *v;
+
+ v = insert_lookup_cgrp(cgrp);
+ if (!v)
+ return 0;
+
+ /* Can't invoke bpf_cgroup_acquire() on an untrusted pointer. */
+ acquired = bpf_cgroup_acquire(v->cgrp);
+ if (acquired)
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(cgrp_kfunc_acquire_no_null_check, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ acquired = bpf_cgroup_acquire(cgrp);
+ /*
+ * Can't invoke bpf_cgroup_release() without checking the return value
+ * of bpf_cgroup_acquire().
+ */
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("arg#0 pointer type STRUCT cgroup must point")
+int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired, *stack_cgrp = (struct cgroup *)&path;
+
+ /* Can't invoke bpf_cgroup_acquire() on a random frame pointer. */
+ acquired = bpf_cgroup_acquire((struct cgroup *)&stack_cgrp);
+ if (acquired)
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("kretprobe/cgroup_destroy_locked")
+__failure __msg("calling kernel function bpf_cgroup_acquire is not allowed")
+int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp)
+{
+ struct cgroup *acquired;
+
+ /* Can't acquire an untrusted struct cgroup * pointer. */
+ acquired = bpf_cgroup_acquire(cgrp);
+ if (acquired)
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("cgrp_kfunc_acquire_trusted_walked")
+int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ /* Can't invoke bpf_cgroup_acquire() on a pointer obtained from walking a trusted cgroup. */
+ acquired = bpf_cgroup_acquire(cgrp->old_dom_cgrp);
+ if (acquired)
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ /* Can't invoke bpf_cgroup_acquire() on a NULL pointer. */
+ acquired = bpf_cgroup_acquire(NULL);
+ if (acquired)
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Unreleased reference")
+int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ acquired = bpf_cgroup_acquire(cgrp);
+
+ /* Acquired cgroup is never released. */
+ __sink(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Unreleased reference")
+int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr;
+ struct __cgrps_kfunc_map_value *v;
+
+ v = insert_lookup_cgrp(cgrp);
+ if (!v)
+ return 0;
+
+ kptr = bpf_kptr_xchg(&v->cgrp, NULL);
+ if (!kptr)
+ return 0;
+
+ /* Kptr retrieved from map is never released. */
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("must be referenced or trusted")
+int BPF_PROG(cgrp_kfunc_rcu_get_release, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr;
+ struct __cgrps_kfunc_map_value *v;
+
+ v = insert_lookup_cgrp(cgrp);
+ if (!v)
+ return 0;
+
+ bpf_rcu_read_lock();
+ kptr = v->cgrp;
+ if (kptr)
+ /* Can't release a cgroup kptr stored in a map. */
+ bpf_cgroup_release(kptr);
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path)
+{
+ struct __cgrps_kfunc_map_value *v;
+
+ v = insert_lookup_cgrp(cgrp);
+ if (!v)
+ return 0;
+
+ /* Can't invoke bpf_cgroup_release() on an untrusted pointer. */
+ bpf_cgroup_release(v->cgrp);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("arg#0 pointer type STRUCT cgroup must point")
+int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired = (struct cgroup *)&path;
+
+ /* Cannot release random frame pointer. */
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path)
+{
+ struct __cgrps_kfunc_map_value local, *v;
+ long status;
+ struct cgroup *acquired, *old;
+ s32 id;
+
+ status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
+ if (status)
+ return 0;
+
+ local.cgrp = NULL;
+ status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST);
+ if (status)
+ return status;
+
+ v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
+ if (!v)
+ return -ENOENT;
+
+ acquired = bpf_cgroup_acquire(cgrp);
+ if (!acquired)
+ return -ENOENT;
+
+ old = bpf_kptr_xchg(&v->cgrp, acquired);
+
+ /* old cannot be passed to bpf_cgroup_release() without a NULL check. */
+ bpf_cgroup_release(old);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("release kernel function bpf_cgroup_release expects")
+int BPF_PROG(cgrp_kfunc_release_unacquired, struct cgroup *cgrp, const char *path)
+{
+ /* Cannot release trusted cgroup pointer which was not acquired. */
+ bpf_cgroup_release(cgrp);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c
new file mode 100644
index 000000000000..5354455a01be
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "cgrp_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int err, pid, invocations;
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(cgroup_mkdir,
+ * TP_PROTO(struct cgroup *cgrp, const char *path),
+ * TP_ARGS(cgrp, path)
+ */
+
+static bool is_test_kfunc_task(void)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+ bool same = pid == cur_pid;
+
+ if (same)
+ __sync_fetch_and_add(&invocations, 1);
+
+ return same;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_acquire_release_argument, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ acquired = bpf_cgroup_acquire(cgrp);
+ if (!acquired)
+ err = 1;
+ else
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_acquire_leave_in_map, struct cgroup *cgrp, const char *path)
+{
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = cgrps_kfunc_map_insert(cgrp);
+ if (status)
+ err = 1;
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr, *cg;
+ struct __cgrps_kfunc_map_value *v;
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = cgrps_kfunc_map_insert(cgrp);
+ if (status) {
+ err = 1;
+ return 0;
+ }
+
+ v = cgrps_kfunc_map_value_lookup(cgrp);
+ if (!v) {
+ err = 2;
+ return 0;
+ }
+
+ kptr = v->cgrp;
+ if (!kptr) {
+ err = 4;
+ return 0;
+ }
+
+ cg = bpf_cgroup_ancestor(kptr, 1);
+ if (cg) /* verifier only check */
+ bpf_cgroup_release(cg);
+
+ kptr = bpf_kptr_xchg(&v->cgrp, NULL);
+ if (!kptr) {
+ err = 3;
+ return 0;
+ }
+
+ bpf_cgroup_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_get_release, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr;
+ struct __cgrps_kfunc_map_value *v;
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = cgrps_kfunc_map_insert(cgrp);
+ if (status) {
+ err = 1;
+ return 0;
+ }
+
+ v = cgrps_kfunc_map_value_lookup(cgrp);
+ if (!v) {
+ err = 2;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ kptr = v->cgrp;
+ if (!kptr)
+ err = 3;
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_get_ancestors, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *self, *ancestor1, *invalid;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ self = bpf_cgroup_ancestor(cgrp, cgrp->level);
+ if (!self) {
+ err = 1;
+ return 0;
+ }
+
+ if (self->self.id != cgrp->self.id) {
+ bpf_cgroup_release(self);
+ err = 2;
+ return 0;
+ }
+ bpf_cgroup_release(self);
+
+ ancestor1 = bpf_cgroup_ancestor(cgrp, cgrp->level - 1);
+ if (!ancestor1) {
+ err = 3;
+ return 0;
+ }
+ bpf_cgroup_release(ancestor1);
+
+ invalid = bpf_cgroup_ancestor(cgrp, 10000);
+ if (invalid) {
+ bpf_cgroup_release(invalid);
+ err = 4;
+ return 0;
+ }
+
+ invalid = bpf_cgroup_ancestor(cgrp, -1);
+ if (invalid) {
+ bpf_cgroup_release(invalid);
+ err = 5;
+ return 0;
+ }
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_from_id, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *parent, *res;
+ u64 parent_cgid;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ /* @cgrp's ID is not visible yet, let's test with the parent */
+ parent = bpf_cgroup_ancestor(cgrp, cgrp->level - 1);
+ if (!parent) {
+ err = 1;
+ return 0;
+ }
+
+ parent_cgid = parent->kn->id;
+ bpf_cgroup_release(parent);
+
+ res = bpf_cgroup_from_id(parent_cgid);
+ if (!res) {
+ err = 2;
+ return 0;
+ }
+
+ bpf_cgroup_release(res);
+
+ if (res != parent) {
+ err = 3;
+ return 0;
+ }
+
+ res = bpf_cgroup_from_id((u64)-1);
+ if (res) {
+ bpf_cgroup_release(res);
+ err = 4;
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c b/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c
new file mode 100644
index 000000000000..8aeba1b75c83
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tracing_net.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct socket_cookie {
+ __u64 cookie_key;
+ __u64 cookie_value;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct socket_cookie);
+} socket_cookies SEC(".maps");
+
+SEC("cgroup/connect6")
+int set_cookie(struct bpf_sock_addr *ctx)
+{
+ struct socket_cookie *p;
+ struct tcp_sock *tcp_sk;
+ struct bpf_sock *sk;
+
+ if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
+ return 1;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ tcp_sk = bpf_skc_to_tcp_sock(sk);
+ if (!tcp_sk)
+ return 1;
+
+ p = bpf_cgrp_storage_get(&socket_cookies,
+ tcp_sk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!p)
+ return 1;
+
+ p->cookie_value = 0xF;
+ p->cookie_key = bpf_get_socket_cookie(ctx);
+ return 1;
+}
+
+SEC("sockops")
+int update_cookie_sockops(struct bpf_sock_ops *ctx)
+{
+ struct socket_cookie *p;
+ struct tcp_sock *tcp_sk;
+ struct bpf_sock *sk;
+
+ if (ctx->family != AF_INET6 || ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB)
+ return 1;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ tcp_sk = bpf_skc_to_tcp_sock(sk);
+ if (!tcp_sk)
+ return 1;
+
+ p = bpf_cgrp_storage_get(&socket_cookies,
+ tcp_sk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0, 0);
+ if (!p)
+ return 1;
+
+ if (p->cookie_key != bpf_get_socket_cookie(ctx))
+ return 1;
+
+ p->cookie_value |= (ctx->local_port << 8);
+ return 1;
+}
+
+SEC("fexit/inet_stream_connect")
+int BPF_PROG(update_cookie_tracing, struct socket *sock,
+ struct sockaddr *uaddr, int addr_len, int flags)
+{
+ struct socket_cookie *p;
+
+ if (uaddr->sa_family != AF_INET6)
+ return 0;
+
+ p = bpf_cgrp_storage_get(&socket_cookies, sock->sk->sk_cgrp_data.cgroup, 0, 0);
+ if (!p)
+ return 0;
+
+ if (p->cookie_key != bpf_get_socket_cookie(sock->sk))
+ return 0;
+
+ p->cookie_value |= 0xF0;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_negative.c b/tools/testing/selftests/bpf/progs/cgrp_ls_negative.c
new file mode 100644
index 000000000000..d41f90e2ab64
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_negative.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ (void)bpf_cgrp_storage_get(&map_a, (struct cgroup *)task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
new file mode 100644
index 000000000000..3500e4b69ebe
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_b SEC(".maps");
+
+int target_hid = 0;
+bool is_cgroup1 = 0;
+
+struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
+void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
+
+static void __on_update(struct cgroup *cgrp)
+{
+ long *ptr;
+
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr += 1;
+
+ ptr = bpf_cgrp_storage_get(&map_b, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr += 1;
+}
+
+SEC("fentry/bpf_local_storage_update")
+int BPF_PROG(on_update)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct cgroup *cgrp;
+
+ if (is_cgroup1) {
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ __on_update(cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+ }
+
+ __on_update(task->cgroups->dfl_cgrp);
+ return 0;
+}
+
+static void __on_enter(struct pt_regs *regs, long id, struct cgroup *cgrp)
+{
+ long *ptr;
+
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr = 200;
+
+ ptr = bpf_cgrp_storage_get(&map_b, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr = 100;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct cgroup *cgrp;
+
+ if (is_cgroup1) {
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ __on_enter(regs, id, cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+ }
+
+ __on_enter(regs, id, task->cgroups->dfl_cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
new file mode 100644
index 000000000000..a2de95f85648
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+__s32 target_pid;
+__u64 cgroup_id;
+int target_hid;
+bool is_cgroup1;
+
+struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
+void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+SEC("?iter.s/cgroup")
+int cgroup_iter(struct bpf_iter__cgroup *ctx)
+{
+ struct cgroup *cgrp = ctx->cgroup;
+ long *ptr;
+
+ if (cgrp == NULL)
+ return 0;
+
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ cgroup_id = cgrp->kn->id;
+ return 0;
+}
+
+static void __no_rcu_lock(struct cgroup *cgrp)
+{
+ long *ptr;
+
+ /* Note that trace rcu is held in sleepable prog, so we can use
+ * bpf_cgrp_storage_get() in sleepable prog.
+ */
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ cgroup_id = cgrp->kn->id;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int cgrp1_no_rcu_lock(void *ctx)
+{
+ struct task_struct *task;
+ struct cgroup *cgrp;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ /* bpf_task_get_cgroup1 can work in sleepable prog */
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ __no_rcu_lock(cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int no_rcu_lock(void *ctx)
+{
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ /* task->cgroups is untrusted in sleepable prog outside of RCU CS */
+ __no_rcu_lock(task->cgroups->dfl_cgrp);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int yes_rcu_lock(void *ctx)
+{
+ struct task_struct *task;
+ struct cgroup *cgrp;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ if (is_cgroup1) {
+ bpf_rcu_read_lock();
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp) {
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ cgroup_id = cgrp->kn->id;
+ bpf_cgroup_release(cgrp);
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ cgrp = task->cgroups->dfl_cgrp;
+ /* cgrp is trusted under RCU CS */
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ cgroup_id = cgrp->kn->id;
+ bpf_rcu_read_unlock();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c b/tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
new file mode 100644
index 000000000000..1c348f000f38
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_b SEC(".maps");
+
+#define MAGIC_VALUE 0xabcd1234
+
+pid_t target_pid = 0;
+int mismatch_cnt = 0;
+int enter_cnt = 0;
+int exit_cnt = 0;
+int target_hid = 0;
+bool is_cgroup1 = 0;
+
+struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
+void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
+
+static void __on_enter(struct pt_regs *regs, long id, struct cgroup *cgrp)
+{
+ long *ptr;
+ int err;
+
+ /* populate value 0 */
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return;
+
+ /* delete value 0 */
+ err = bpf_cgrp_storage_delete(&map_a, cgrp);
+ if (err)
+ return;
+
+ /* value is not available */
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, 0);
+ if (ptr)
+ return;
+
+ /* re-populate the value */
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return;
+ __sync_fetch_and_add(&enter_cnt, 1);
+ *ptr = MAGIC_VALUE + enter_cnt;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ struct cgroup *cgrp;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ if (is_cgroup1) {
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ __on_enter(regs, id, cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+ }
+
+ __on_enter(regs, id, task->cgroups->dfl_cgrp);
+ return 0;
+}
+
+static void __on_exit(struct pt_regs *regs, long id, struct cgroup *cgrp)
+{
+ long *ptr;
+
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return;
+
+ __sync_fetch_and_add(&exit_cnt, 1);
+ if (*ptr != MAGIC_VALUE + exit_cnt)
+ __sync_fetch_and_add(&mismatch_cnt, 1);
+}
+
+SEC("tp_btf/sys_exit")
+int BPF_PROG(on_exit, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ struct cgroup *cgrp;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ if (is_cgroup1) {
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ __on_exit(regs, id, cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+ }
+
+ __on_exit(regs, id, task->cgroups->dfl_cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/compute_live_registers.c b/tools/testing/selftests/bpf/progs/compute_live_registers.c
new file mode 100644
index 000000000000..6884ab99a421
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/compute_live_registers.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_arena_common.h"
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} test_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 1);
+} arena SEC(".maps");
+
+SEC("socket")
+__log_level(2)
+__msg(" 0: .......... (b7) r0 = 42")
+__msg(" 1: 0......... (bf) r1 = r0")
+__msg(" 2: .1........ (bf) r2 = r1")
+__msg(" 3: ..2....... (bf) r3 = r2")
+__msg(" 4: ...3...... (bf) r4 = r3")
+__msg(" 5: ....4..... (bf) r5 = r4")
+__msg(" 6: .....5.... (bf) r6 = r5")
+__msg(" 7: ......6... (bf) r7 = r6")
+__msg(" 8: .......7.. (bf) r8 = r7")
+__msg(" 9: ........8. (bf) r9 = r8")
+__msg("10: .........9 (bf) r0 = r9")
+__msg("11: 0......... (95) exit")
+__naked void assign_chain(void)
+{
+ asm volatile (
+ "r0 = 42;"
+ "r1 = r0;"
+ "r2 = r1;"
+ "r3 = r2;"
+ "r4 = r3;"
+ "r5 = r4;"
+ "r6 = r5;"
+ "r7 = r6;"
+ "r8 = r7;"
+ "r9 = r8;"
+ "r0 = r9;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("0: .......... (b7) r1 = 7")
+__msg("1: .1........ (07) r1 += 7")
+__msg("2: .......... (b7) r2 = 7")
+__msg("3: ..2....... (b7) r3 = 42")
+__msg("4: ..23...... (0f) r2 += r3")
+__msg("5: .......... (b7) r0 = 0")
+__msg("6: 0......... (95) exit")
+__naked void arithmetics(void)
+{
+ asm volatile (
+ "r1 = 7;"
+ "r1 += 7;"
+ "r2 = 7;"
+ "r3 = 42;"
+ "r2 += r3;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+#ifdef CAN_USE_BPF_ST
+SEC("socket")
+__log_level(2)
+__msg(" 1: .1........ (07) r1 += -8")
+__msg(" 2: .1........ (7a) *(u64 *)(r1 +0) = 7")
+__msg(" 3: .1........ (b7) r2 = 42")
+__msg(" 4: .12....... (7b) *(u64 *)(r1 +0) = r2")
+__msg(" 5: .12....... (7b) *(u64 *)(r1 +0) = r2")
+__msg(" 6: .......... (b7) r0 = 0")
+__naked void store(void)
+{
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "*(u64 *)(r1 +0) = 7;"
+ "r2 = 42;"
+ "*(u64 *)(r1 +0) = r2;"
+ "*(u64 *)(r1 +0) = r2;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+#endif
+
+SEC("socket")
+__log_level(2)
+__msg("1: ....4..... (07) r4 += -8")
+__msg("2: ....4..... (79) r5 = *(u64 *)(r4 +0)")
+__msg("3: ....45.... (07) r4 += -8")
+__naked void load(void)
+{
+ asm volatile (
+ "r4 = r10;"
+ "r4 += -8;"
+ "r5 = *(u64 *)(r4 +0);"
+ "r4 += -8;"
+ "r0 = r5;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("0: .1........ (61) r2 = *(u32 *)(r1 +0)")
+__msg("1: ..2....... (d4) r2 = le64 r2")
+__msg("2: ..2....... (bf) r0 = r2")
+__naked void endian(void)
+{
+ asm volatile (
+ "r2 = *(u32 *)(r1 +0);"
+ "r2 = le64 r2;"
+ "r0 = r2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg(" 8: 0......... (b7) r1 = 1")
+__msg(" 9: 01........ (db) r1 = atomic64_fetch_add((u64 *)(r0 +0), r1)")
+__msg("10: 01........ (c3) lock *(u32 *)(r0 +0) += r1")
+__msg("11: 01........ (db) r1 = atomic64_xchg((u64 *)(r0 +0), r1)")
+__msg("12: 01........ (bf) r2 = r0")
+__msg("13: .12....... (bf) r0 = r1")
+__msg("14: 012....... (db) r0 = atomic64_cmpxchg((u64 *)(r2 +0), r0, r1)")
+__naked void atomic(void)
+{
+ asm volatile (
+ "r2 = r10;"
+ "r2 += -8;"
+ "r1 = 0;"
+ "*(u64 *)(r2 +0) = r1;"
+ "r1 = %[test_map] ll;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto 1f;"
+ "r1 = 1;"
+ "r1 = atomic_fetch_add((u64 *)(r0 +0), r1);"
+ ".8byte %[add_nofetch];" /* same as "lock *(u32 *)(r0 +0) += r1;" */
+ "r1 = xchg_64(r0 + 0, r1);"
+ "r2 = r0;"
+ "r0 = r1;"
+ "r0 = cmpxchg_64(r2 + 0, r0, r1);"
+ "1: exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(test_map),
+ __imm_insn(add_nofetch, BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0))
+ : __clobber_all);
+}
+
+#ifdef CAN_USE_LOAD_ACQ_STORE_REL
+
+SEC("socket")
+__log_level(2)
+__msg("2: .12....... (db) store_release((u64 *)(r2 -8), r1)")
+__msg("3: .......... (bf) r3 = r10")
+__msg("4: ...3...... (db) r4 = load_acquire((u64 *)(r3 -8))")
+__naked void atomic_load_acq_store_rel(void)
+{
+ asm volatile (
+ "r1 = 42;"
+ "r2 = r10;"
+ ".8byte %[store_release_insn];" /* store_release((u64 *)(r2 - 8), r1); */
+ "r3 = r10;"
+ ".8byte %[load_acquire_insn];" /* r4 = load_acquire((u64 *)(r3 + 0)); */
+ "r0 = r4;"
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_2, BPF_REG_1, -8)),
+ __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_4, BPF_REG_3, -8))
+ : __clobber_all);
+}
+
+#endif /* CAN_USE_LOAD_ACQ_STORE_REL */
+
+SEC("socket")
+__log_level(2)
+__msg("4: .12....7.. (85) call bpf_trace_printk#6")
+__msg("5: 0......7.. (0f) r0 += r7")
+__naked void regular_call(void)
+{
+ asm volatile (
+ "r7 = 1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 1;"
+ "call %[bpf_trace_printk];"
+ "r0 += r7;"
+ "exit;"
+ :
+ : __imm(bpf_trace_printk)
+ : __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("2: 012....... (25) if r1 > 0x7 goto pc+1")
+__msg("3: ..2....... (bf) r0 = r2")
+__naked void if1(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "r2 = 2;"
+ "if r1 > 0x7 goto +1;"
+ "r0 = r2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("3: 0123...... (2d) if r1 > r3 goto pc+1")
+__msg("4: ..2....... (bf) r0 = r2")
+__naked void if2(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "r2 = 2;"
+ "r3 = 7;"
+ "if r1 > r3 goto +1;"
+ "r0 = r2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Verifier misses that r2 is alive if jset is not handled properly */
+SEC("socket")
+__log_level(2)
+__msg("2: 012....... (45) if r1 & 0x7 goto pc+1")
+__naked void if3_jset_bug(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "r2 = 2;"
+ "if r1 & 0x7 goto +1;"
+ "exit;"
+ "r0 = r2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("0: .......... (b7) r1 = 0")
+__msg("1: .1........ (b7) r2 = 7")
+__msg("2: .12....... (25) if r1 > 0x7 goto pc+4")
+__msg("3: .12....... (07) r1 += 1")
+__msg("4: .12....... (27) r2 *= 2")
+__msg("5: .12....... (05) goto pc+0")
+__msg("6: .12....... (05) goto pc-5")
+__msg("7: .......... (b7) r0 = 0")
+__msg("8: 0......... (95) exit")
+__naked void loop(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ "r2 = 7;"
+ "if r1 > 0x7 goto +4;"
+ "r1 += 1;"
+ "r2 *= 2;"
+ "goto +0;"
+ "goto -5;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_trace_printk)
+ : __clobber_all);
+}
+
+#ifdef CAN_USE_GOTOL
+SEC("socket")
+__log_level(2)
+__msg("2: .123...... (25) if r1 > 0x7 goto pc+2")
+__msg("3: ..2....... (bf) r0 = r2")
+__msg("4: 0......... (06) gotol pc+1")
+__msg("5: ...3...... (bf) r0 = r3")
+__msg("6: 0......... (95) exit")
+__naked void gotol(void)
+{
+ asm volatile (
+ "r2 = 42;"
+ "r3 = 24;"
+ "if r1 > 0x7 goto +2;"
+ "r0 = r2;"
+ "gotol +1;"
+ "r0 = r3;"
+ "exit;"
+ :
+ : __imm(bpf_trace_printk)
+ : __clobber_all);
+}
+#endif
+
+SEC("socket")
+__log_level(2)
+__msg("0: .......... (b7) r1 = 1")
+__msg("1: .1........ (e5) may_goto pc+1")
+__msg("2: .......... (05) goto pc-3")
+__msg("3: .1........ (bf) r0 = r1")
+__msg("4: 0......... (95) exit")
+__naked void may_goto(void)
+{
+ asm volatile (
+ "1: r1 = 1;"
+ ".8byte %[may_goto];"
+ "goto 1b;"
+ "r0 = r1;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id),
+ __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, +1 /* offset */, 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("1: 0......... (18) r2 = 0x7")
+__msg("3: 0.2....... (0f) r0 += r2")
+__naked void ldimm64(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "r2 = 0x7 ll;"
+ "r0 += r2;"
+ "exit;"
+ :
+ :: __clobber_all);
+}
+
+/* No rules specific for LD_ABS/LD_IND, default behaviour kicks in */
+SEC("socket")
+__log_level(2)
+__msg("2: 0123456789 (30) r0 = *(u8 *)skb[42]")
+__msg("3: 012.456789 (0f) r7 += r0")
+__msg("4: 012.456789 (b7) r3 = 42")
+__msg("5: 0123456789 (50) r0 = *(u8 *)skb[r3 + 0]")
+__msg("6: 0......7.. (0f) r7 += r0")
+__naked void ldabs(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "r7 = 0;"
+ "r0 = *(u8 *)skb[42];"
+ "r7 += r0;"
+ "r3 = 42;"
+ ".8byte %[ld_ind];" /* same as "r0 = *(u8 *)skb[r3];" */
+ "r7 += r0;"
+ "r0 = r7;"
+ "exit;"
+ :
+ : __imm_insn(ld_ind, BPF_LD_IND(BPF_B, BPF_REG_3, 0))
+ : __clobber_all);
+}
+
+
+#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__log_level(2)
+__msg(" 6: .12345.... (85) call bpf_arena_alloc_pages")
+__msg(" 7: 0......... (bf) r1 = addr_space_cast(r0, 0, 1)")
+__msg(" 8: .1........ (b7) r2 = 42")
+__naked void addr_space_cast(void)
+{
+ asm volatile (
+ "r1 = %[arena] ll;"
+ "r2 = 0;"
+ "r3 = 1;"
+ "r4 = 0;"
+ "r5 = 0;"
+ "call %[bpf_arena_alloc_pages];"
+ "r1 = addr_space_cast(r0, 0, 1);"
+ "r2 = 42;"
+ "*(u64 *)(r1 +0) = r2;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_arena_alloc_pages),
+ __imm_addr(arena)
+ : __clobber_all);
+}
+#endif
+
+static __used __naked int aux1(void)
+{
+ asm volatile (
+ "r0 = r1;"
+ "r0 += r2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("0: ....45.... (b7) r1 = 1")
+__msg("1: .1..45.... (b7) r2 = 2")
+__msg("2: .12.45.... (b7) r3 = 3")
+/* Conservative liveness for subprog parameters. */
+__msg("3: .12345.... (85) call pc+2")
+__msg("4: .......... (b7) r0 = 0")
+__msg("5: 0......... (95) exit")
+__msg("6: .12....... (bf) r0 = r1")
+__msg("7: 0.2....... (0f) r0 += r2")
+/* Conservative liveness for subprog return value. */
+__msg("8: 0......... (95) exit")
+__naked void subprog1(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "r2 = 2;"
+ "r3 = 3;"
+ "call aux1;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* to retain debug info for BTF generation */
+void kfunc_root(void)
+{
+ bpf_arena_alloc_pages(0, 0, 0, 0, 0);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/connect4_dropper.c b/tools/testing/selftests/bpf/progs/connect4_dropper.c
new file mode 100644
index 000000000000..a3819a5d09c8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/connect4_dropper.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <string.h>
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+
+#include <sys/socket.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define VERDICT_REJECT 0
+#define VERDICT_PROCEED 1
+
+int port;
+
+SEC("cgroup/connect4")
+int connect_v4_dropper(struct bpf_sock_addr *ctx)
+{
+ if (ctx->type != SOCK_STREAM)
+ return VERDICT_PROCEED;
+ if (ctx->user_port == bpf_htons(port))
+ return VERDICT_REJECT;
+ return VERDICT_PROCEED;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c
index a943d394fd3a..9e9ebf27b878 100644
--- a/tools/testing/selftests/bpf/progs/connect4_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect4_prog.c
@@ -7,8 +7,7 @@
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/in6.h>
-#include <sys/socket.h>
-#include <netinet/tcp.h>
+#include <linux/tcp.h>
#include <linux/if.h>
#include <errno.h>
@@ -31,9 +30,11 @@
#define IFNAMSIZ 16
#endif
-int _version SEC("version") = 1;
+#ifndef SOL_TCP
+#define SOL_TCP 6
+#endif
-__attribute__ ((noinline))
+__attribute__ ((noinline)) __weak
int do_bind(struct bpf_sock_addr *ctx)
{
struct sockaddr_in sa = {};
@@ -198,4 +199,10 @@ int connect_v4_prog(struct bpf_sock_addr *ctx)
return do_bind(ctx) ? 1 : 0;
}
+SEC("cgroup/connect4")
+int connect_v4_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/connect6_prog.c b/tools/testing/selftests/bpf/progs/connect6_prog.c
index 506d0f81a375..e98573b00ddb 100644
--- a/tools/testing/selftests/bpf/progs/connect6_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect6_prog.c
@@ -24,8 +24,6 @@
#define DST_REWRITE_PORT6 6666
-int _version SEC("version") = 1;
-
SEC("cgroup/connect6")
int connect_v6_prog(struct bpf_sock_addr *ctx)
{
@@ -92,4 +90,10 @@ int connect_v6_prog(struct bpf_sock_addr *ctx)
return 1;
}
+SEC("cgroup/connect6")
+int connect_v6_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/connect_force_port4.c b/tools/testing/selftests/bpf/progs/connect_force_port4.c
index 7396308677a3..27a632dd382e 100644
--- a/tools/testing/selftests/bpf/progs/connect_force_port4.c
+++ b/tools/testing/selftests/bpf/progs/connect_force_port4.c
@@ -10,8 +10,9 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include <bpf_sockopt_helpers.h>
+
char _license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
struct svc_addr {
__be32 addr;
@@ -58,6 +59,9 @@ int connect4(struct bpf_sock_addr *ctx)
SEC("cgroup/getsockname4")
int getsockname4(struct bpf_sock_addr *ctx)
{
+ if (!get_set_sk_priority(ctx))
+ return 1;
+
/* Expose local server as 1.2.3.4:60000 to client. */
if (ctx->user_port == bpf_htons(60123)) {
ctx->user_ip4 = bpf_htonl(0x01020304);
@@ -71,6 +75,9 @@ int getpeername4(struct bpf_sock_addr *ctx)
{
struct svc_addr *orig;
+ if (!get_set_sk_priority(ctx))
+ return 1;
+
/* Expose service 1.2.3.4:60000 as peer instead of backend. */
if (ctx->user_port == bpf_htons(60123)) {
orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0, 0);
diff --git a/tools/testing/selftests/bpf/progs/connect_force_port6.c b/tools/testing/selftests/bpf/progs/connect_force_port6.c
index c1a2b555e9ad..19cad93e612f 100644
--- a/tools/testing/selftests/bpf/progs/connect_force_port6.c
+++ b/tools/testing/selftests/bpf/progs/connect_force_port6.c
@@ -9,8 +9,9 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include <bpf_sockopt_helpers.h>
+
char _license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
struct svc_addr {
__be32 addr[4];
@@ -63,6 +64,9 @@ int connect6(struct bpf_sock_addr *ctx)
SEC("cgroup/getsockname6")
int getsockname6(struct bpf_sock_addr *ctx)
{
+ if (!get_set_sk_priority(ctx))
+ return 1;
+
/* Expose local server as [fc00::1]:60000 to client. */
if (ctx->user_port == bpf_htons(60124)) {
ctx->user_ip6[0] = bpf_htonl(0xfc000000);
@@ -79,6 +83,9 @@ int getpeername6(struct bpf_sock_addr *ctx)
{
struct svc_addr *orig;
+ if (!get_set_sk_priority(ctx))
+ return 1;
+
/* Expose service [fc00::1]:60000 as peer instead of backend. */
if (ctx->user_port == bpf_htons(60124)) {
orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0, 0);
diff --git a/tools/testing/selftests/bpf/progs/connect_ping.c b/tools/testing/selftests/bpf/progs/connect_ping.c
new file mode 100644
index 000000000000..60178192b672
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/connect_ping.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright 2022 Google LLC.
+ */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+
+/* 2001:db8::1 */
+#define BINDADDR_V6 { { { 0x20,0x01,0x0d,0xb8,0,0,0,0,0,0,0,0,0,0,0,1 } } }
+
+__u32 do_bind = 0;
+__u32 has_error = 0;
+__u32 invocations_v4 = 0;
+__u32 invocations_v6 = 0;
+
+SEC("cgroup/connect4")
+int connect_v4_prog(struct bpf_sock_addr *ctx)
+{
+ struct sockaddr_in sa = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = bpf_htonl(0x01010101),
+ };
+
+ __sync_fetch_and_add(&invocations_v4, 1);
+
+ if (do_bind && bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)))
+ has_error = 1;
+
+ return 1;
+}
+
+SEC("cgroup/connect6")
+int connect_v6_prog(struct bpf_sock_addr *ctx)
+{
+ struct sockaddr_in6 sa = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = BINDADDR_V6,
+ };
+
+ __sync_fetch_and_add(&invocations_v6, 1);
+
+ if (do_bind && bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)))
+ has_error = 1;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/connect_unix_prog.c b/tools/testing/selftests/bpf/progs/connect_unix_prog.c
new file mode 100644
index 000000000000..ba60adadb335
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/connect_unix_prog.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite";
+
+SEC("cgroup/connect_unix")
+int connect_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1;
+ int ret;
+
+ /* Rewrite destination. */
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1);
+ if (ret)
+ return 0;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 0;
+
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
+ return 0;
+
+ return 1;
+}
+
+SEC("cgroup/connect_unix")
+int connect_unix_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/core_kern.c b/tools/testing/selftests/bpf/progs/core_kern.c
new file mode 100644
index 000000000000..004f2acef2eb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/core_kern.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+#define ATTR __always_inline
+#include "test_jhash.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 256);
+} array1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 256);
+} array2 SEC(".maps");
+
+static __noinline int randmap(int v, const struct net_device *dev)
+{
+ struct bpf_map *map = (struct bpf_map *)&array1;
+ int key = bpf_get_prandom_u32() & 0xff;
+ int *val;
+
+ if (bpf_get_prandom_u32() & 1)
+ map = (struct bpf_map *)&array2;
+
+ val = bpf_map_lookup_elem(map, &key);
+ if (val)
+ *val = bpf_get_prandom_u32() + v + dev->mtu;
+
+ return 0;
+}
+
+SEC("tp_btf/xdp_devmap_xmit")
+int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device
+ *from_dev, const struct net_device *to_dev, int sent, int drops,
+ int err)
+{
+ return randmap(from_dev->ifindex, from_dev);
+}
+
+SEC("fentry/eth_type_trans")
+int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb,
+ struct net_device *dev, unsigned short protocol)
+{
+ return randmap(dev->ifindex + skb->len, dev);
+}
+
+SEC("fexit/eth_type_trans")
+int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb,
+ struct net_device *dev, unsigned short protocol)
+{
+ return randmap(dev->ifindex + skb->len, dev);
+}
+
+volatile const int never;
+
+struct __sk_bUfF /* it will not exist in vmlinux */ {
+ int len;
+} __attribute__((preserve_access_index));
+
+struct bpf_testmod_test_read_ctx /* it exists in bpf_testmod */ {
+ size_t len;
+} __attribute__((preserve_access_index));
+
+SEC("tc")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ void *ptr;
+ int nh_off, i = 0;
+
+ nh_off = 14;
+
+ /* pragma unroll doesn't work on large loops */
+#define C do { \
+ ptr = data + i; \
+ if (ptr + nh_off > data_end) \
+ break; \
+ ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
+ if (never) { \
+ /* below is a dead code with unresolvable CO-RE relo */ \
+ i += ((struct __sk_bUfF *)ctx)->len; \
+ /* this CO-RE relo may or may not resolve
+ * depending on whether bpf_testmod is loaded.
+ */ \
+ i += ((struct bpf_testmod_test_read_ctx *)ctx)->len; \
+ } \
+ } while (0);
+#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
+ C30;C30;C30; /* 90 calls */
+ return 0;
+}
+
+typedef int (*func_proto_typedef___match)(long);
+typedef int (*func_proto_typedef___doesnt_match)(char *);
+typedef int (*func_proto_typedef_nested1)(func_proto_typedef___match);
+
+int proto_out[3];
+
+SEC("raw_tracepoint/sys_enter")
+int core_relo_proto(void *ctx)
+{
+ proto_out[0] = bpf_core_type_exists(func_proto_typedef___match);
+ proto_out[1] = bpf_core_type_exists(func_proto_typedef___doesnt_match);
+ proto_out[2] = bpf_core_type_exists(func_proto_typedef_nested1);
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/core_kern_overflow.c b/tools/testing/selftests/bpf/progs/core_kern_overflow.c
new file mode 100644
index 000000000000..f0d5652256ba
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/core_kern_overflow.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+typedef int (*func_proto_typedef)(long);
+typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
+typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
+
+int proto_out;
+
+SEC("raw_tracepoint/sys_enter")
+int core_relo_proto(void *ctx)
+{
+ proto_out = bpf_core_type_exists(func_proto_typedef_nested2);
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
index e6e616cb7bc9..5760ae015e09 100644
--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
+++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
@@ -13,6 +13,24 @@ struct core_reloc_kernel_output {
int valid[10];
char comm[sizeof("test_progs")];
int comm_len;
+ bool local_task_struct_matches;
+};
+
+/*
+ * MODULE
+ */
+
+struct core_reloc_module_output {
+ long long len;
+ long long off;
+ int read_ctx_sz;
+ bool read_ctx_exists;
+ bool buf_exists;
+ bool len_exists;
+ bool off_exists;
+ /* we have test_progs[-flavor], so cut flavor part */
+ char comm[sizeof("test_progs")];
+ int comm_len;
};
/*
@@ -329,6 +347,7 @@ struct core_reloc_nesting___err_too_deep {
*/
struct core_reloc_arrays_output {
int a2;
+ int a3;
char b123;
int c1c;
int d00d;
@@ -437,6 +456,15 @@ struct core_reloc_arrays___err_bad_zero_sz_arr {
struct core_reloc_arrays_substruct d[1][2];
};
+struct core_reloc_arrays___err_bad_signed_arr_elem_sz {
+ /* int -> short (signed!): not supported case */
+ short a[5];
+ char b[2][3][4];
+ struct core_reloc_arrays_substruct c[3];
+ struct core_reloc_arrays_substruct d[1][2];
+ struct core_reloc_arrays_substruct f[][2];
+};
+
/*
* PRIMITIVES
*/
@@ -683,27 +711,11 @@ struct core_reloc_existence___minimal {
int a;
};
-struct core_reloc_existence___err_wrong_int_sz {
- short a;
-};
-
-struct core_reloc_existence___err_wrong_int_type {
+struct core_reloc_existence___wrong_field_defs {
+ void *a;
int b[1];
-};
-
-struct core_reloc_existence___err_wrong_int_kind {
struct{ int x; } c;
-};
-
-struct core_reloc_existence___err_wrong_arr_kind {
int arr;
-};
-
-struct core_reloc_existence___err_wrong_arr_value_type {
- short arr[1];
-};
-
-struct core_reloc_existence___err_wrong_struct_type {
int s;
};
@@ -784,12 +796,21 @@ struct core_reloc_bitfields___err_too_big_bitfield {
*/
struct core_reloc_size_output {
int int_sz;
+ int int_off;
int struct_sz;
+ int struct_off;
int union_sz;
+ int union_off;
int arr_sz;
+ int arr_off;
int arr_elem_sz;
+ int arr_elem_off;
int ptr_sz;
+ int ptr_off;
int enum_sz;
+ int enum_off;
+ int float_sz;
+ int float_off;
};
struct core_reloc_size {
@@ -799,6 +820,7 @@ struct core_reloc_size {
int arr_field[4];
void *ptr_field;
enum { VALUE = 123 } enum_field;
+ float float_field;
};
struct core_reloc_size___diff_sz {
@@ -808,6 +830,17 @@ struct core_reloc_size___diff_sz {
char arr_field[10];
void *ptr_field;
enum { OTHER_VALUE = 0xFFFFFFFFFFFFFFFF } enum_field;
+ double float_field;
+};
+
+struct core_reloc_size___diff_offs {
+ float float_field;
+ enum { YET_OTHER_VALUE = 123 } enum_field;
+ void *ptr_field;
+ int arr_field[4];
+ union { int x; } union_field;
+ struct { int x; } struct_field;
+ int int_field;
};
/* Error case of two candidates with the fields (int_field) at the same
@@ -822,6 +855,7 @@ struct core_reloc_size___err_ambiguous1 {
int arr_field[4];
void *ptr_field;
enum { VALUE___1 = 123 } enum_field;
+ float float_field;
};
struct core_reloc_size___err_ambiguous2 {
@@ -833,13 +867,15 @@ struct core_reloc_size___err_ambiguous2 {
int arr_field[4];
void *ptr_field;
enum { VALUE___2 = 123 } enum_field;
+ float float_field;
};
/*
- * TYPE EXISTENCE & SIZE
+ * TYPE EXISTENCE, MATCH & SIZE
*/
struct core_reloc_type_based_output {
bool struct_exists;
+ bool complex_struct_exists;
bool union_exists;
bool enum_exists;
bool typedef_named_struct_exists;
@@ -848,9 +884,24 @@ struct core_reloc_type_based_output {
bool typedef_int_exists;
bool typedef_enum_exists;
bool typedef_void_ptr_exists;
+ bool typedef_restrict_ptr_exists;
bool typedef_func_proto_exists;
bool typedef_arr_exists;
+ bool struct_matches;
+ bool complex_struct_matches;
+ bool union_matches;
+ bool enum_matches;
+ bool typedef_named_struct_matches;
+ bool typedef_anon_struct_matches;
+ bool typedef_struct_ptr_matches;
+ bool typedef_int_matches;
+ bool typedef_enum_matches;
+ bool typedef_void_ptr_matches;
+ bool typedef_restrict_ptr_matches;
+ bool typedef_func_proto_matches;
+ bool typedef_arr_matches;
+
int struct_sz;
int union_sz;
int enum_sz;
@@ -868,6 +919,14 @@ struct a_struct {
int x;
};
+struct a_complex_struct {
+ union {
+ struct a_struct * restrict a;
+ void *b;
+ } x;
+ volatile long y;
+};
+
union a_union {
int y;
int z;
@@ -892,6 +951,7 @@ typedef int int_typedef;
typedef enum { TYPEDEF_ENUM_VAL1, TYPEDEF_ENUM_VAL2 } enum_typedef;
typedef void *void_ptr_typedef;
+typedef int *restrict restrict_ptr_typedef;
typedef int (*func_proto_typedef)(long);
@@ -899,22 +959,86 @@ typedef char arr_typedef[20];
struct core_reloc_type_based {
struct a_struct f1;
- union a_union f2;
- enum an_enum f3;
- named_struct_typedef f4;
- anon_struct_typedef f5;
- struct_ptr_typedef f6;
- int_typedef f7;
- enum_typedef f8;
- void_ptr_typedef f9;
- func_proto_typedef f10;
- arr_typedef f11;
+ struct a_complex_struct f2;
+ union a_union f3;
+ enum an_enum f4;
+ named_struct_typedef f5;
+ anon_struct_typedef f6;
+ struct_ptr_typedef f7;
+ int_typedef f8;
+ enum_typedef f9;
+ void_ptr_typedef f10;
+ restrict_ptr_typedef f11;
+ func_proto_typedef f12;
+ arr_typedef f13;
};
/* no types in target */
struct core_reloc_type_based___all_missing {
};
+/* different member orders, enum variant values, signedness, etc */
+struct a_struct___diff {
+ int x;
+ int a;
+};
+
+struct a_struct___forward;
+
+struct a_complex_struct___diff {
+ union {
+ struct a_struct___forward *a;
+ void *b;
+ } x;
+ volatile long y;
+};
+
+union a_union___diff {
+ int z;
+ int y;
+};
+
+typedef struct a_struct___diff named_struct_typedef___diff;
+
+typedef struct { int z, x, y; } anon_struct_typedef___diff;
+
+typedef struct {
+ int c;
+ int b;
+ int a;
+} *struct_ptr_typedef___diff;
+
+enum an_enum___diff {
+ AN_ENUM_VAL2___diff = 0,
+ AN_ENUM_VAL1___diff = 42,
+ AN_ENUM_VAL3___diff = 1,
+};
+
+typedef unsigned int int_typedef___diff;
+
+typedef enum { TYPEDEF_ENUM_VAL2___diff, TYPEDEF_ENUM_VAL1___diff = 50 } enum_typedef___diff;
+
+typedef const void *void_ptr_typedef___diff;
+
+typedef int_typedef___diff (*func_proto_typedef___diff)(long);
+
+typedef char arr_typedef___diff[3];
+
+struct core_reloc_type_based___diff {
+ struct a_struct___diff f1;
+ struct a_complex_struct___diff f2;
+ union a_union___diff f3;
+ enum an_enum___diff f4;
+ named_struct_typedef___diff f5;
+ anon_struct_typedef___diff f6;
+ struct_ptr_typedef___diff f7;
+ int_typedef___diff f8;
+ enum_typedef___diff f9;
+ void_ptr_typedef___diff f10;
+ func_proto_typedef___diff f11;
+ arr_typedef___diff f12;
+};
+
/* different type sizes, extra modifiers, anon vs named enums, etc */
struct a_struct___diff_sz {
long x;
@@ -1093,6 +1217,20 @@ struct core_reloc_enumval_output {
int anon_val2;
};
+struct core_reloc_enum64val_output {
+ bool unsigned_val1_exists;
+ bool unsigned_val2_exists;
+ bool unsigned_val3_exists;
+ bool signed_val1_exists;
+ bool signed_val2_exists;
+ bool signed_val3_exists;
+
+ long unsigned_val1;
+ long unsigned_val2;
+ long signed_val1;
+ long signed_val2;
+};
+
enum named_enum {
NAMED_ENUM_VAL1 = 1,
NAMED_ENUM_VAL2 = 2,
@@ -1110,6 +1248,23 @@ struct core_reloc_enumval {
anon_enum f2;
};
+enum named_unsigned_enum64 {
+ UNSIGNED_ENUM64_VAL1 = 0x1ffffffffULL,
+ UNSIGNED_ENUM64_VAL2 = 0x2,
+ UNSIGNED_ENUM64_VAL3 = 0x3ffffffffULL,
+};
+
+enum named_signed_enum64 {
+ SIGNED_ENUM64_VAL1 = 0x1ffffffffLL,
+ SIGNED_ENUM64_VAL2 = -2,
+ SIGNED_ENUM64_VAL3 = 0x3ffffffffLL,
+};
+
+struct core_reloc_enum64val {
+ enum named_unsigned_enum64 f1;
+ enum named_signed_enum64 f2;
+};
+
/* differing enumerator values */
enum named_enum___diff {
NAMED_ENUM_VAL1___diff = 101,
@@ -1128,6 +1283,23 @@ struct core_reloc_enumval___diff {
anon_enum___diff f2;
};
+enum named_unsigned_enum64___diff {
+ UNSIGNED_ENUM64_VAL1___diff = 0x101ffffffffULL,
+ UNSIGNED_ENUM64_VAL2___diff = 0x202ffffffffULL,
+ UNSIGNED_ENUM64_VAL3___diff = 0x303ffffffffULL,
+};
+
+enum named_signed_enum64___diff {
+ SIGNED_ENUM64_VAL1___diff = -101,
+ SIGNED_ENUM64_VAL2___diff = -202,
+ SIGNED_ENUM64_VAL3___diff = -303,
+};
+
+struct core_reloc_enum64val___diff {
+ enum named_unsigned_enum64___diff f1;
+ enum named_signed_enum64___diff f2;
+};
+
/* missing (optional) third enum value */
enum named_enum___val3_missing {
NAMED_ENUM_VAL1___val3_missing = 111,
@@ -1144,6 +1316,21 @@ struct core_reloc_enumval___val3_missing {
anon_enum___val3_missing f2;
};
+enum named_unsigned_enum64___val3_missing {
+ UNSIGNED_ENUM64_VAL1___val3_missing = 0x111ffffffffULL,
+ UNSIGNED_ENUM64_VAL2___val3_missing = 0x222,
+};
+
+enum named_signed_enum64___val3_missing {
+ SIGNED_ENUM64_VAL1___val3_missing = 0x111ffffffffLL,
+ SIGNED_ENUM64_VAL2___val3_missing = -222,
+};
+
+struct core_reloc_enum64val___val3_missing {
+ enum named_unsigned_enum64___val3_missing f1;
+ enum named_signed_enum64___val3_missing f2;
+};
+
/* missing (mandatory) second enum value, should fail */
enum named_enum___err_missing {
NAMED_ENUM_VAL1___err_missing = 1,
@@ -1159,3 +1346,18 @@ struct core_reloc_enumval___err_missing {
enum named_enum___err_missing f1;
anon_enum___err_missing f2;
};
+
+enum named_unsigned_enum64___err_missing {
+ UNSIGNED_ENUM64_VAL1___err_missing = 0x1ffffffffULL,
+ UNSIGNED_ENUM64_VAL3___err_missing = 0x3ffffffffULL,
+};
+
+enum named_signed_enum64___err_missing {
+ SIGNED_ENUM64_VAL1___err_missing = 0x1ffffffffLL,
+ SIGNED_ENUM64_VAL3___err_missing = -3,
+};
+
+struct core_reloc_enum64val___err_missing {
+ enum named_unsigned_enum64___err_missing f1;
+ enum named_signed_enum64___err_missing f2;
+};
diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h
new file mode 100644
index 000000000000..86085b79f5ca
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cpumask_common.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _CPUMASK_COMMON_H
+#define _CPUMASK_COMMON_H
+
+#include "errno.h"
+#include <stdbool.h>
+
+/* Should use BTF_FIELDS_MAX, but it is not always available in vmlinux.h,
+ * so use the hard-coded number as a workaround.
+ */
+#define CPUMASK_KPTR_FIELDS_MAX 11
+
+int err;
+
+#define private(name) SEC(".bss." #name) __attribute__((aligned(8)))
+private(MASK) static struct bpf_cpumask __kptr * global_mask;
+
+struct __cpumask_map_value {
+ struct bpf_cpumask __kptr * cpumask;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct __cpumask_map_value);
+ __uint(max_entries, 1);
+} __cpumask_map SEC(".maps");
+
+struct bpf_cpumask *bpf_cpumask_create(void) __ksym __weak;
+void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym __weak;
+struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym __weak;
+u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym __weak;
+u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym __weak;
+u32 bpf_cpumask_first_and(const struct cpumask *src1,
+ const struct cpumask *src2) __ksym __weak;
+void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_and(struct bpf_cpumask *cpumask,
+ const struct cpumask *src1,
+ const struct cpumask *src2) __ksym __weak;
+void bpf_cpumask_or(struct bpf_cpumask *cpumask,
+ const struct cpumask *src1,
+ const struct cpumask *src2) __ksym __weak;
+void bpf_cpumask_xor(struct bpf_cpumask *cpumask,
+ const struct cpumask *src1,
+ const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym __weak;
+u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym __weak;
+u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
+ const struct cpumask *src2) __ksym __weak;
+u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym __weak;
+int bpf_cpumask_populate(struct cpumask *cpumask, void *src, size_t src__sz) __ksym __weak;
+
+void bpf_rcu_read_lock(void) __ksym __weak;
+void bpf_rcu_read_unlock(void) __ksym __weak;
+
+static inline const struct cpumask *cast(struct bpf_cpumask *cpumask)
+{
+ return (const struct cpumask *)cpumask;
+}
+
+static inline struct bpf_cpumask *create_cpumask(void)
+{
+ struct bpf_cpumask *cpumask;
+
+ cpumask = bpf_cpumask_create();
+ if (!cpumask) {
+ err = 1;
+ return NULL;
+ }
+
+ if (!bpf_cpumask_empty(cast(cpumask))) {
+ err = 2;
+ bpf_cpumask_release(cpumask);
+ return NULL;
+ }
+
+ return cpumask;
+}
+
+static inline struct __cpumask_map_value *cpumask_map_value_lookup(void)
+{
+ u32 key = 0;
+
+ return bpf_map_lookup_elem(&__cpumask_map, &key);
+}
+
+static inline int cpumask_map_insert(struct bpf_cpumask *mask)
+{
+ struct __cpumask_map_value local, *v;
+ long status;
+ struct bpf_cpumask *old;
+ u32 key = 0;
+
+ local.cpumask = NULL;
+ status = bpf_map_update_elem(&__cpumask_map, &key, &local, 0);
+ if (status) {
+ bpf_cpumask_release(mask);
+ return status;
+ }
+
+ v = bpf_map_lookup_elem(&__cpumask_map, &key);
+ if (!v) {
+ bpf_cpumask_release(mask);
+ return -ENOENT;
+ }
+
+ old = bpf_kptr_xchg(&v->cpumask, mask);
+ if (old) {
+ bpf_cpumask_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _CPUMASK_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c
new file mode 100644
index 000000000000..8a2fd596c8a3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#include "cpumask_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct kptr_nested_array_2 {
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_array_1 {
+ /* Make btf_parse_fields() in map_create() return -E2BIG */
+ struct kptr_nested_array_2 d_2[CPUMASK_KPTR_FIELDS_MAX + 1];
+};
+
+struct kptr_nested_array {
+ struct kptr_nested_array_1 d_1;
+};
+
+private(MASK_NESTED) static struct kptr_nested_array global_mask_nested_arr;
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
+int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ cpumask = create_cpumask();
+ __sink(cpumask);
+
+ /* cpumask is never released. */
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("NULL pointer passed to trusted arg0")
+int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ cpumask = create_cpumask();
+
+ /* cpumask is released twice. */
+ bpf_cpumask_release(cpumask);
+ bpf_cpumask_release(cpumask);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("must be referenced")
+int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ /* Can't acquire a non-struct bpf_cpumask. */
+ cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr);
+ __sink(cpumask);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("bpf_cpumask_set_cpu args#1 expected pointer to STRUCT bpf_cpumask")
+int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags)
+{
+ /* Can't set the CPU of a non-struct bpf_cpumask. */
+ bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
+int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+ struct __cpumask_map_value *v;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (cpumask_map_insert(cpumask))
+ return 0;
+
+ v = cpumask_map_value_lookup();
+ if (!v)
+ return 0;
+
+ cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
+
+ /* cpumask is never released. */
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("NULL pointer passed to trusted arg0")
+int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags)
+{
+ /* NULL passed to KF_TRUSTED_ARGS kfunc. */
+ bpf_cpumask_empty(NULL);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("R2 must be a rcu pointer")
+int BPF_PROG(test_global_mask_out_of_rcu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask, local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ local = global_mask;
+ if (!local) {
+ err = 4;
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ bpf_rcu_read_unlock();
+
+ /* RCU region is exited before calling KF_RCU kfunc. */
+
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("NULL pointer passed to trusted arg1")
+int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask, local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ local = global_mask;
+
+ /* No NULL check is performed on global cpumask kptr. */
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
+
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to helper arg2")
+int BPF_PROG(test_global_mask_rcu_no_null_check, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *prev, *curr;
+
+ curr = bpf_cpumask_create();
+ if (!curr)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask, curr);
+ if (prev)
+ bpf_cpumask_release(prev);
+
+ bpf_rcu_read_lock();
+ curr = global_mask;
+ /* PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU passed to bpf_kptr_xchg() */
+ prev = bpf_kptr_xchg(&global_mask, curr);
+ bpf_rcu_read_unlock();
+ if (prev)
+ bpf_cpumask_release(prev);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("has no valid kptr")
+int BPF_PROG(test_invalid_nested_array, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask_nested_arr.d_1.d_2[CPUMASK_KPTR_FIELDS_MAX].mask, local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("type=scalar expected=fp")
+int BPF_PROG(test_populate_invalid_destination, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *invalid = (struct bpf_cpumask *)0x123456;
+ u64 bits;
+ int ret;
+
+ ret = bpf_cpumask_populate((struct cpumask *)invalid, &bits, sizeof(bits));
+ if (!ret)
+ err = 2;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("leads to invalid memory access")
+int BPF_PROG(test_populate_invalid_source, struct task_struct *task, u64 clone_flags)
+{
+ void *garbage = (void *)0x123456;
+ struct bpf_cpumask *local;
+ int ret;
+
+ local = create_cpumask();
+ if (!local) {
+ err = 1;
+ return 0;
+ }
+
+ ret = bpf_cpumask_populate((struct cpumask *)local, garbage, 8);
+ if (!ret)
+ err = 2;
+
+ bpf_cpumask_release(local);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c
new file mode 100644
index 000000000000..0e04c31b91c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cpumask_success.c
@@ -0,0 +1,890 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+#include "cpumask_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int pid, nr_cpus;
+
+struct kptr_nested {
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_pair {
+ struct bpf_cpumask __kptr * mask_1;
+ struct bpf_cpumask __kptr * mask_2;
+};
+
+struct kptr_nested_mid {
+ int dummy;
+ struct kptr_nested m;
+};
+
+struct kptr_nested_deep {
+ struct kptr_nested_mid ptrs[2];
+ struct kptr_nested_pair ptr_pairs[3];
+};
+
+struct kptr_nested_deep_array_1_2 {
+ int dummy;
+ struct bpf_cpumask __kptr * mask[CPUMASK_KPTR_FIELDS_MAX];
+};
+
+struct kptr_nested_deep_array_1_1 {
+ int dummy;
+ struct kptr_nested_deep_array_1_2 d_2;
+};
+
+struct kptr_nested_deep_array_1 {
+ long dummy;
+ struct kptr_nested_deep_array_1_1 d_1;
+};
+
+struct kptr_nested_deep_array_2_2 {
+ long dummy[2];
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_deep_array_2_1 {
+ int dummy;
+ struct kptr_nested_deep_array_2_2 d_2[CPUMASK_KPTR_FIELDS_MAX];
+};
+
+struct kptr_nested_deep_array_2 {
+ long dummy;
+ struct kptr_nested_deep_array_2_1 d_1;
+};
+
+struct kptr_nested_deep_array_3_2 {
+ long dummy[2];
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_deep_array_3_1 {
+ int dummy;
+ struct kptr_nested_deep_array_3_2 d_2;
+};
+
+struct kptr_nested_deep_array_3 {
+ long dummy;
+ struct kptr_nested_deep_array_3_1 d_1[CPUMASK_KPTR_FIELDS_MAX];
+};
+
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
+private(MASK) static struct kptr_nested global_mask_nested[2];
+private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep;
+private(MASK_1) static struct kptr_nested_deep_array_1 global_mask_nested_deep_array_1;
+private(MASK_2) static struct kptr_nested_deep_array_2 global_mask_nested_deep_array_2;
+private(MASK_3) static struct kptr_nested_deep_array_3 global_mask_nested_deep_array_3;
+
+static bool is_test_task(void)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+
+ return pid == cur_pid;
+}
+
+static bool create_cpumask_set(struct bpf_cpumask **out1,
+ struct bpf_cpumask **out2,
+ struct bpf_cpumask **out3,
+ struct bpf_cpumask **out4)
+{
+ struct bpf_cpumask *mask1, *mask2, *mask3, *mask4;
+
+ mask1 = create_cpumask();
+ if (!mask1)
+ return false;
+
+ mask2 = create_cpumask();
+ if (!mask2) {
+ bpf_cpumask_release(mask1);
+ err = 3;
+ return false;
+ }
+
+ mask3 = create_cpumask();
+ if (!mask3) {
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ err = 4;
+ return false;
+ }
+
+ mask4 = create_cpumask();
+ if (!mask4) {
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(mask3);
+ err = 5;
+ return false;
+ }
+
+ *out1 = mask1;
+ *out2 = mask2;
+ *out3 = mask3;
+ *out4 = mask4;
+
+ return true;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ bpf_cpumask_set_cpu(0, cpumask);
+ if (!bpf_cpumask_test_cpu(0, cast(cpumask))) {
+ err = 3;
+ goto release_exit;
+ }
+
+ bpf_cpumask_clear_cpu(0, cpumask);
+ if (bpf_cpumask_test_cpu(0, cast(cpumask))) {
+ err = 4;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ bpf_cpumask_setall(cpumask);
+ if (!bpf_cpumask_full(cast(cpumask))) {
+ err = 3;
+ goto release_exit;
+ }
+
+ bpf_cpumask_clear(cpumask);
+ if (!bpf_cpumask_empty(cast(cpumask))) {
+ err = 4;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (bpf_cpumask_first(cast(cpumask)) < nr_cpus) {
+ err = 3;
+ goto release_exit;
+ }
+
+ if (bpf_cpumask_first_zero(cast(cpumask)) != 0) {
+ bpf_printk("first zero: %d", bpf_cpumask_first_zero(cast(cpumask)));
+ err = 4;
+ goto release_exit;
+ }
+
+ bpf_cpumask_set_cpu(0, cpumask);
+ if (bpf_cpumask_first(cast(cpumask)) != 0) {
+ err = 5;
+ goto release_exit;
+ }
+
+ if (bpf_cpumask_first_zero(cast(cpumask)) != 1) {
+ err = 6;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2;
+ u32 first;
+
+ if (!is_test_task())
+ return 0;
+
+ mask1 = create_cpumask();
+ if (!mask1)
+ return 0;
+
+ mask2 = create_cpumask();
+ if (!mask2)
+ goto release_exit;
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+
+ first = bpf_cpumask_first_and(cast(mask1), cast(mask2));
+ if (first <= 1)
+ err = 3;
+
+release_exit:
+ if (mask1)
+ bpf_cpumask_release(mask1);
+ if (mask2)
+ bpf_cpumask_release(mask2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (bpf_cpumask_test_and_set_cpu(0, cpumask)) {
+ err = 3;
+ goto release_exit;
+ }
+
+ if (!bpf_cpumask_test_and_set_cpu(0, cpumask)) {
+ err = 4;
+ goto release_exit;
+ }
+
+ if (!bpf_cpumask_test_and_clear_cpu(0, cpumask)) {
+ err = 5;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
+
+ if (!is_test_task())
+ return 0;
+
+ if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
+ return 0;
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+
+ if (bpf_cpumask_and(dst1, cast(mask1), cast(mask2))) {
+ err = 6;
+ goto release_exit;
+ }
+ if (!bpf_cpumask_empty(cast(dst1))) {
+ err = 7;
+ goto release_exit;
+ }
+
+ bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
+ if (!bpf_cpumask_test_cpu(0, cast(dst1))) {
+ err = 8;
+ goto release_exit;
+ }
+ if (!bpf_cpumask_test_cpu(1, cast(dst1))) {
+ err = 9;
+ goto release_exit;
+ }
+
+ bpf_cpumask_xor(dst2, cast(mask1), cast(mask2));
+ if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
+ err = 10;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(dst1);
+ bpf_cpumask_release(dst2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
+
+ if (!is_test_task())
+ return 0;
+
+ if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
+ return 0;
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+ if (bpf_cpumask_intersects(cast(mask1), cast(mask2))) {
+ err = 6;
+ goto release_exit;
+ }
+
+ bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
+ if (!bpf_cpumask_subset(cast(mask1), cast(dst1))) {
+ err = 7;
+ goto release_exit;
+ }
+
+ if (!bpf_cpumask_subset(cast(mask2), cast(dst1))) {
+ err = 8;
+ goto release_exit;
+ }
+
+ if (bpf_cpumask_subset(cast(dst1), cast(mask1))) {
+ err = 9;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(dst1);
+ bpf_cpumask_release(dst2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
+ int cpu;
+
+ if (!is_test_task())
+ return 0;
+
+ if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
+ return 0;
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+ bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
+
+ cpu = bpf_cpumask_any_distribute(cast(mask1));
+ if (cpu != 0) {
+ err = 6;
+ goto release_exit;
+ }
+
+ cpu = bpf_cpumask_any_distribute(cast(dst2));
+ if (cpu < nr_cpus) {
+ err = 7;
+ goto release_exit;
+ }
+
+ bpf_cpumask_copy(dst2, cast(dst1));
+ if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
+ err = 8;
+ goto release_exit;
+ }
+
+ cpu = bpf_cpumask_any_distribute(cast(dst2));
+ if (cpu > 1) {
+ err = 9;
+ goto release_exit;
+ }
+
+ cpu = bpf_cpumask_any_and_distribute(cast(mask1), cast(mask2));
+ if (cpu < nr_cpus) {
+ err = 10;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(dst1);
+ bpf_cpumask_release(dst2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (cpumask_map_insert(cpumask))
+ err = 3;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+ struct __cpumask_map_value *v;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (cpumask_map_insert(cpumask)) {
+ err = 3;
+ return 0;
+ }
+
+ v = cpumask_map_value_lookup();
+ if (!v) {
+ err = 4;
+ return 0;
+ }
+
+ cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
+ if (cpumask)
+ bpf_cpumask_release(cpumask);
+ else
+ err = 5;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ if (!is_test_task())
+ return 0;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask, local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ local = global_mask;
+ if (!local) {
+ err = 4;
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ if (!is_test_task())
+ return 0;
+
+ /* Kptr arrays with one element are special cased, being treated
+ * just like a single pointer.
+ */
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask_array_one[0], local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ local = global_mask_array_one[0];
+ if (!local) {
+ err = 4;
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
+ struct bpf_cpumask **mask1)
+{
+ struct bpf_cpumask *local;
+
+ if (!is_test_task())
+ return 0;
+
+ /* Check if two kptrs in the array work and independently */
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ bpf_rcu_read_lock();
+
+ local = bpf_kptr_xchg(mask0, local);
+ if (local) {
+ err = 1;
+ goto err_exit;
+ }
+
+ /* [<mask 0>, *] */
+ if (!*mask0) {
+ err = 2;
+ goto err_exit;
+ }
+
+ if (!mask1)
+ goto err_exit;
+
+ /* [*, NULL] */
+ if (*mask1) {
+ err = 3;
+ goto err_exit;
+ }
+
+ local = create_cpumask();
+ if (!local) {
+ err = 9;
+ goto err_exit;
+ }
+
+ local = bpf_kptr_xchg(mask1, local);
+ if (local) {
+ err = 10;
+ goto err_exit;
+ }
+
+ /* [<mask 0>, <mask 1>] */
+ if (!*mask0 || !*mask1 || *mask0 == *mask1) {
+ err = 11;
+ goto err_exit;
+ }
+
+err_exit:
+ if (local)
+ bpf_cpumask_release(local);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags)
+{
+ return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags)
+{
+ return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags)
+{
+ return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask);
+}
+
+/* Ensure that the field->offset has been correctly advanced from one
+ * nested struct or array sub-tree to another. In the case of
+ * kptr_nested_deep, it comprises two sub-trees: ktpr_1 and kptr_2. By
+ * calling bpf_kptr_xchg() on every single kptr in both nested sub-trees,
+ * the verifier should reject the program if the field->offset of any kptr
+ * is incorrect.
+ *
+ * For instance, if we have 10 kptrs in a nested struct and a program that
+ * accesses each kptr individually with bpf_kptr_xchg(), the compiler
+ * should emit instructions to access 10 different offsets if it works
+ * correctly. If the field->offset values of any pair of them are
+ * incorrectly the same, the number of unique offsets in btf_record for
+ * this nested struct should be less than 10. The verifier should fail to
+ * discover some of the offsets emitted by the compiler.
+ *
+ * Even if the field->offset values of kptrs are not duplicated, the
+ * verifier should fail to find a btf_field for the instruction accessing a
+ * kptr if the corresponding field->offset is pointing to a random
+ * incorrect offset.
+ */
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags)
+{
+ int r, i;
+
+ r = _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask,
+ &global_mask_nested_deep.ptrs[1].m.mask);
+ if (r)
+ return r;
+
+ for (i = 0; i < 3; i++) {
+ r = _global_mask_array_rcu(&global_mask_nested_deep.ptr_pairs[i].mask_1,
+ &global_mask_nested_deep.ptr_pairs[i].mask_2);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_deep_array_rcu, struct task_struct *task, u64 clone_flags)
+{
+ int i;
+
+ for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
+ _global_mask_array_rcu(&global_mask_nested_deep_array_1.d_1.d_2.mask[i], NULL);
+
+ for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
+ _global_mask_array_rcu(&global_mask_nested_deep_array_2.d_1.d_2[i].mask, NULL);
+
+ for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
+ _global_mask_array_rcu(&global_mask_nested_deep_array_3.d_1[i].d_2.mask, NULL);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local;
+
+ if (!is_test_task())
+ return 0;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ if (bpf_cpumask_weight(cast(local)) != 0) {
+ err = 3;
+ goto out;
+ }
+
+ bpf_cpumask_set_cpu(0, local);
+ if (bpf_cpumask_weight(cast(local)) != 1) {
+ err = 4;
+ goto out;
+ }
+
+ /*
+ * Make sure that adding additional CPUs changes the weight. Test to
+ * see whether the CPU was set to account for running on UP machines.
+ */
+ bpf_cpumask_set_cpu(1, local);
+ if (bpf_cpumask_test_cpu(1, cast(local)) && bpf_cpumask_weight(cast(local)) != 2) {
+ err = 5;
+ goto out;
+ }
+
+ bpf_cpumask_clear(local);
+ if (bpf_cpumask_weight(cast(local)) != 0) {
+ err = 6;
+ goto out;
+ }
+out:
+ bpf_cpumask_release(local);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2;
+
+ mask1 = bpf_cpumask_create();
+ mask2 = bpf_cpumask_create();
+
+ if (!mask1 || !mask2)
+ goto free_masks_return;
+
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)mask1);
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)mask2);
+
+free_masks_return:
+ if (mask1)
+ bpf_cpumask_release(mask1);
+ if (mask2)
+ bpf_cpumask_release(mask2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_populate_reject_small_mask, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local;
+ u8 toofewbits;
+ int ret;
+
+ if (!is_test_task())
+ return 0;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ /* The kfunc should prevent this operation */
+ ret = bpf_cpumask_populate((struct cpumask *)local, &toofewbits, sizeof(toofewbits));
+ if (ret != -EACCES)
+ err = 2;
+
+ bpf_cpumask_release(local);
+
+ return 0;
+}
+
+/* Mask is guaranteed to be large enough for bpf_cpumask_t. */
+#define CPUMASK_TEST_MASKLEN (sizeof(cpumask_t))
+
+/* Add an extra word for the test_populate_reject_unaligned test. */
+u64 bits[CPUMASK_TEST_MASKLEN / 8 + 1];
+extern bool CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS __kconfig __weak;
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_populate_reject_unaligned, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask;
+ char *src;
+ int ret;
+
+ if (!is_test_task())
+ return 0;
+
+ /* Skip if unaligned accesses are fine for this arch. */
+ if (CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return 0;
+
+ mask = bpf_cpumask_create();
+ if (!mask) {
+ err = 1;
+ return 0;
+ }
+
+ /* Misalign the source array by a byte. */
+ src = &((char *)bits)[1];
+
+ ret = bpf_cpumask_populate((struct cpumask *)mask, src, CPUMASK_TEST_MASKLEN);
+ if (ret != -EINVAL)
+ err = 2;
+
+ bpf_cpumask_release(mask);
+
+ return 0;
+}
+
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_populate, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask;
+ bool bit;
+ int ret;
+ int i;
+
+ if (!is_test_task())
+ return 0;
+
+ /* Set only odd bits. */
+ __builtin_memset(bits, 0xaa, CPUMASK_TEST_MASKLEN);
+
+ mask = bpf_cpumask_create();
+ if (!mask) {
+ err = 1;
+ return 0;
+ }
+
+ /* Pass the entire bits array, the kfunc will only copy the valid bits. */
+ ret = bpf_cpumask_populate((struct cpumask *)mask, bits, CPUMASK_TEST_MASKLEN);
+ if (ret) {
+ err = 2;
+ goto out;
+ }
+
+ /*
+ * Test is there to appease the verifier. We cannot directly
+ * access NR_CPUS, the upper bound for nr_cpus, so we infer
+ * it from the size of cpumask_t.
+ */
+ if (nr_cpus < 0 || nr_cpus >= CPUMASK_TEST_MASKLEN * 8) {
+ err = 3;
+ goto out;
+ }
+
+ bpf_for(i, 0, nr_cpus) {
+ /* Odd-numbered bits should be set, even ones unset. */
+ bit = bpf_cpumask_test_cpu(i, (const struct cpumask *)mask);
+ if (bit == (i % 2 != 0))
+ continue;
+
+ err = 4;
+ break;
+ }
+
+out:
+ bpf_cpumask_release(mask);
+
+ return 0;
+}
+
+#undef CPUMASK_TEST_MASKLEN
diff --git a/tools/testing/selftests/bpf/progs/crypto_basic.c b/tools/testing/selftests/bpf/progs/crypto_basic.c
new file mode 100644
index 000000000000..8cf7168b42d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/crypto_basic.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include "crypto_common.h"
+
+int status;
+SEC("syscall")
+int crypto_release(void *ctx)
+{
+ struct bpf_crypto_params params = {
+ .type = "skcipher",
+ .algo = "ecb(aes)",
+ .key_len = 16,
+ };
+
+ struct bpf_crypto_ctx *cctx;
+ int err = 0;
+
+ status = 0;
+
+ cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
+
+ if (!cctx) {
+ status = err;
+ return 0;
+ }
+
+ bpf_crypto_ctx_release(cctx);
+
+ return 0;
+}
+
+SEC("syscall")
+__failure __msg("Unreleased reference")
+int crypto_acquire(void *ctx)
+{
+ struct bpf_crypto_params params = {
+ .type = "skcipher",
+ .algo = "ecb(aes)",
+ .key_len = 16,
+ };
+ struct bpf_crypto_ctx *cctx;
+ int err = 0;
+
+ status = 0;
+
+ cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
+
+ if (!cctx) {
+ status = err;
+ return 0;
+ }
+
+ cctx = bpf_crypto_ctx_acquire(cctx);
+ if (!cctx)
+ return -EINVAL;
+
+ bpf_crypto_ctx_release(cctx);
+
+ return 0;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/crypto_bench.c b/tools/testing/selftests/bpf/progs/crypto_bench.c
new file mode 100644
index 000000000000..4ac956b26240
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/crypto_bench.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include "crypto_common.h"
+
+const volatile unsigned int len = 16;
+char cipher[128] = {};
+u32 key_len, authsize;
+char dst[256] = {};
+u8 key[256] = {};
+long hits = 0;
+int status;
+
+SEC("syscall")
+int crypto_setup(void *args)
+{
+ struct bpf_crypto_ctx *cctx;
+ struct bpf_crypto_params params = {
+ .type = "skcipher",
+ .key_len = key_len,
+ .authsize = authsize,
+ };
+ int err = 0;
+
+ status = 0;
+
+ if (!cipher[0] || !key_len || key_len > 256) {
+ status = -EINVAL;
+ return 0;
+ }
+
+ __builtin_memcpy(&params.algo, cipher, sizeof(cipher));
+ __builtin_memcpy(&params.key, key, sizeof(key));
+ cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
+
+ if (!cctx) {
+ status = err;
+ return 0;
+ }
+
+ err = crypto_ctx_insert(cctx);
+ if (err && err != -EEXIST)
+ status = err;
+
+ return 0;
+}
+
+SEC("tc")
+int crypto_encrypt(struct __sk_buff *skb)
+{
+ struct __crypto_ctx_value *v;
+ struct bpf_crypto_ctx *ctx;
+ struct bpf_dynptr psrc, pdst;
+
+ v = crypto_ctx_value_lookup();
+ if (!v) {
+ status = -ENOENT;
+ return 0;
+ }
+
+ ctx = v->ctx;
+ if (!ctx) {
+ status = -ENOENT;
+ return 0;
+ }
+
+ bpf_dynptr_from_skb(skb, 0, &psrc);
+ bpf_dynptr_from_mem(dst, len, 0, &pdst);
+
+ status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
+ __sync_add_and_fetch(&hits, 1);
+
+ return 0;
+}
+
+SEC("tc")
+int crypto_decrypt(struct __sk_buff *skb)
+{
+ struct bpf_dynptr psrc, pdst;
+ struct __crypto_ctx_value *v;
+ struct bpf_crypto_ctx *ctx;
+
+ v = crypto_ctx_value_lookup();
+ if (!v)
+ return -ENOENT;
+
+ ctx = v->ctx;
+ if (!ctx)
+ return -ENOENT;
+
+ bpf_dynptr_from_skb(skb, 0, &psrc);
+ bpf_dynptr_from_mem(dst, len, 0, &pdst);
+
+ status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
+ __sync_add_and_fetch(&hits, 1);
+
+ return 0;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/crypto_common.h b/tools/testing/selftests/bpf/progs/crypto_common.h
new file mode 100644
index 000000000000..57dd7a68a8c3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/crypto_common.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _CRYPTO_COMMON_H
+#define _CRYPTO_COMMON_H
+
+#include "errno.h"
+#include <stdbool.h>
+
+struct bpf_crypto_ctx *bpf_crypto_ctx_create(const struct bpf_crypto_params *params,
+ u32 params__sz, int *err) __ksym;
+struct bpf_crypto_ctx *bpf_crypto_ctx_acquire(struct bpf_crypto_ctx *ctx) __ksym;
+void bpf_crypto_ctx_release(struct bpf_crypto_ctx *ctx) __ksym;
+int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx, const struct bpf_dynptr *src,
+ const struct bpf_dynptr *dst, const struct bpf_dynptr *iv) __ksym;
+int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx, const struct bpf_dynptr *src,
+ const struct bpf_dynptr *dst, const struct bpf_dynptr *iv) __ksym;
+
+struct __crypto_ctx_value {
+ struct bpf_crypto_ctx __kptr * ctx;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct __crypto_ctx_value);
+ __uint(max_entries, 1);
+} __crypto_ctx_map SEC(".maps");
+
+static inline struct __crypto_ctx_value *crypto_ctx_value_lookup(void)
+{
+ u32 key = 0;
+
+ return bpf_map_lookup_elem(&__crypto_ctx_map, &key);
+}
+
+static inline int crypto_ctx_insert(struct bpf_crypto_ctx *ctx)
+{
+ struct __crypto_ctx_value local, *v;
+ struct bpf_crypto_ctx *old;
+ u32 key = 0;
+ int err;
+
+ local.ctx = NULL;
+ err = bpf_map_update_elem(&__crypto_ctx_map, &key, &local, 0);
+ if (err) {
+ bpf_crypto_ctx_release(ctx);
+ return err;
+ }
+
+ v = bpf_map_lookup_elem(&__crypto_ctx_map, &key);
+ if (!v) {
+ bpf_crypto_ctx_release(ctx);
+ return -ENOENT;
+ }
+
+ old = bpf_kptr_xchg(&v->ctx, ctx);
+ if (old) {
+ bpf_crypto_ctx_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _CRYPTO_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c b/tools/testing/selftests/bpf/progs/crypto_sanity.c
new file mode 100644
index 000000000000..645be6cddf36
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include "crypto_common.h"
+
+unsigned char key[256] = {};
+u16 udp_test_port = 7777;
+u32 authsize, key_len;
+char algo[128] = {};
+char dst[16] = {};
+int status;
+
+static int skb_dynptr_validate(struct __sk_buff *skb, struct bpf_dynptr *psrc)
+{
+ struct ipv6hdr ip6h;
+ struct udphdr udph;
+ u32 offset;
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IPV6))
+ return -1;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &ip6h, sizeof(ip6h)))
+ return -1;
+
+ if (ip6h.nexthdr != IPPROTO_UDP)
+ return -1;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(ip6h), &udph, sizeof(udph)))
+ return -1;
+
+ if (udph.dest != __bpf_htons(udp_test_port))
+ return -1;
+
+ offset = ETH_HLEN + sizeof(ip6h) + sizeof(udph);
+ if (skb->len < offset + 16)
+ return -1;
+
+ /* let's make sure that 16 bytes of payload are in the linear part of skb */
+ bpf_skb_pull_data(skb, offset + 16);
+ bpf_dynptr_from_skb(skb, 0, psrc);
+ bpf_dynptr_adjust(psrc, offset, offset + 16);
+
+ return 0;
+}
+
+SEC("syscall")
+int skb_crypto_setup(void *ctx)
+{
+ struct bpf_crypto_params params = {
+ .type = "skcipher",
+ .key_len = key_len,
+ .authsize = authsize,
+ };
+ struct bpf_crypto_ctx *cctx;
+ int err = 0;
+
+ status = 0;
+
+ if (key_len > 256) {
+ status = -EINVAL;
+ return 0;
+ }
+
+ __builtin_memcpy(&params.algo, algo, sizeof(algo));
+ __builtin_memcpy(&params.key, key, sizeof(key));
+ cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
+
+ if (!cctx) {
+ status = err;
+ return 0;
+ }
+
+ err = crypto_ctx_insert(cctx);
+ if (err && err != -EEXIST)
+ status = err;
+
+ return 0;
+}
+
+SEC("tc")
+int decrypt_sanity(struct __sk_buff *skb)
+{
+ struct __crypto_ctx_value *v;
+ struct bpf_crypto_ctx *ctx;
+ struct bpf_dynptr psrc, pdst;
+ int err;
+
+ err = skb_dynptr_validate(skb, &psrc);
+ if (err < 0) {
+ status = err;
+ return TC_ACT_SHOT;
+ }
+
+ v = crypto_ctx_value_lookup();
+ if (!v) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ ctx = v->ctx;
+ if (!ctx) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ /* dst is a global variable to make testing part easier to check. In real
+ * production code, a percpu map should be used to store the result.
+ */
+ bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
+
+ status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
+
+ return TC_ACT_SHOT;
+}
+
+SEC("tc")
+int encrypt_sanity(struct __sk_buff *skb)
+{
+ struct __crypto_ctx_value *v;
+ struct bpf_crypto_ctx *ctx;
+ struct bpf_dynptr psrc, pdst;
+ int err;
+
+ status = 0;
+
+ err = skb_dynptr_validate(skb, &psrc);
+ if (err < 0) {
+ status = err;
+ return TC_ACT_SHOT;
+ }
+
+ v = crypto_ctx_value_lookup();
+ if (!v) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ ctx = v->ctx;
+ if (!ctx) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ /* dst is a global variable to make testing part easier to check. In real
+ * production code, a percpu map should be used to store the result.
+ */
+ bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
+
+ status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
+
+ return TC_ACT_SHOT;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/csum_diff_test.c b/tools/testing/selftests/bpf/progs/csum_diff_test.c
new file mode 100644
index 000000000000..9438f1773a58
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/csum_diff_test.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates */
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define BUFF_SZ 512
+
+/* Will be updated by benchmark before program loading */
+char to_buff[BUFF_SZ];
+const volatile unsigned int to_buff_len = 0;
+char from_buff[BUFF_SZ];
+const volatile unsigned int from_buff_len = 0;
+unsigned short seed = 0;
+
+short result;
+
+char _license[] SEC("license") = "GPL";
+
+SEC("tc")
+int compute_checksum(void *ctx)
+{
+ int to_len_half = to_buff_len / 2;
+ int from_len_half = from_buff_len / 2;
+ short result2;
+
+ /* Calculate checksum in one go */
+ result2 = bpf_csum_diff((void *)from_buff, from_buff_len,
+ (void *)to_buff, to_buff_len, seed);
+
+ /* Calculate checksum by concatenating bpf_csum_diff()*/
+ result = bpf_csum_diff((void *)from_buff, from_buff_len - from_len_half,
+ (void *)to_buff, to_buff_len - to_len_half, seed);
+
+ result = bpf_csum_diff((void *)from_buff + (from_buff_len - from_len_half), from_len_half,
+ (void *)to_buff + (to_buff_len - to_len_half), to_len_half, result);
+
+ result = (result == result2) ? result : 0;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/decap_sanity.c b/tools/testing/selftests/bpf/progs/decap_sanity.c
new file mode 100644
index 000000000000..bd3c657c58a7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/decap_sanity.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define UDP_TEST_PORT 7777
+
+void *bpf_cast_to_kern_ctx(void *) __ksym;
+bool init_csum_partial = false;
+bool final_csum_none = false;
+bool broken_csum_start = false;
+
+static unsigned int skb_headlen(const struct sk_buff *skb)
+{
+ return skb->len - skb->data_len;
+}
+
+static unsigned int skb_headroom(const struct sk_buff *skb)
+{
+ return skb->data - skb->head;
+}
+
+static int skb_checksum_start_offset(const struct sk_buff *skb)
+{
+ return skb->csum_start - skb_headroom(skb);
+}
+
+SEC("tc")
+int decap_sanity(struct __sk_buff *skb)
+{
+ struct sk_buff *kskb;
+ struct ipv6hdr ip6h;
+ struct udphdr udph;
+ int err;
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IPV6))
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &ip6h, sizeof(ip6h)))
+ return TC_ACT_SHOT;
+
+ if (ip6h.nexthdr != IPPROTO_UDP)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(ip6h), &udph, sizeof(udph)))
+ return TC_ACT_SHOT;
+
+ if (udph.dest != __bpf_constant_htons(UDP_TEST_PORT))
+ return TC_ACT_SHOT;
+
+ kskb = bpf_cast_to_kern_ctx(skb);
+ init_csum_partial = (kskb->ip_summed == CHECKSUM_PARTIAL);
+ err = bpf_skb_adjust_room(skb, -(s32)(ETH_HLEN + sizeof(ip6h) + sizeof(udph)),
+ 1, BPF_F_ADJ_ROOM_FIXED_GSO);
+ if (err)
+ return TC_ACT_SHOT;
+ final_csum_none = (kskb->ip_summed == CHECKSUM_NONE);
+ if (kskb->ip_summed == CHECKSUM_PARTIAL &&
+ (unsigned int)skb_checksum_start_offset(kskb) >= skb_headlen(kskb))
+ broken_csum_start = true;
+
+ return TC_ACT_SHOT;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/dev_cgroup.c b/tools/testing/selftests/bpf/progs/dev_cgroup.c
index 8924e06bdef0..c1dfbd2b56fc 100644
--- a/tools/testing/selftests/bpf/progs/dev_cgroup.c
+++ b/tools/testing/selftests/bpf/progs/dev_cgroup.c
@@ -41,14 +41,14 @@ int bpf_prog1(struct bpf_cgroup_dev_ctx *ctx)
bpf_trace_printk(fmt, sizeof(fmt), ctx->major, ctx->minor);
#endif
- /* Allow access to /dev/zero and /dev/random.
+ /* Allow access to /dev/null and /dev/urandom.
* Forbid everything else.
*/
if (ctx->major != 1 || type != BPF_DEVCG_DEV_CHAR)
return 0;
switch (ctx->minor) {
- case 5: /* 1:5 /dev/zero */
+ case 3: /* 1:3 /dev/null */
case 9: /* 1:9 /dev/urandom */
return 1;
}
@@ -57,4 +57,3 @@ int bpf_prog1(struct bpf_cgroup_dev_ctx *ctx)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/testing/selftests/bpf/progs/dmabuf_iter.c b/tools/testing/selftests/bpf/progs/dmabuf_iter.c
new file mode 100644
index 000000000000..13cdb11fdeb2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dmabuf_iter.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Google LLC */
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+
+/* From uapi/linux/dma-buf.h */
+#define DMA_BUF_NAME_LEN 32
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, DMA_BUF_NAME_LEN);
+ __type(value, bool);
+ __uint(max_entries, 5);
+} testbuf_hash SEC(".maps");
+
+/*
+ * Fields output by this iterator are delimited by newlines. Convert any
+ * newlines in user-provided printed strings to spaces.
+ */
+static void sanitize_string(char *src, size_t size)
+{
+ for (char *c = src; (size_t)(c - src) < size && *c; ++c)
+ if (*c == '\n')
+ *c = ' ';
+}
+
+SEC("iter/dmabuf")
+int dmabuf_collector(struct bpf_iter__dmabuf *ctx)
+{
+ const struct dma_buf *dmabuf = ctx->dmabuf;
+ struct seq_file *seq = ctx->meta->seq;
+ unsigned long inode = 0;
+ size_t size;
+ const char *pname, *exporter;
+ char name[DMA_BUF_NAME_LEN] = {'\0'};
+
+ if (!dmabuf)
+ return 0;
+
+ if (BPF_CORE_READ_INTO(&inode, dmabuf, file, f_inode, i_ino) ||
+ bpf_core_read(&size, sizeof(size), &dmabuf->size) ||
+ bpf_core_read(&pname, sizeof(pname), &dmabuf->name) ||
+ bpf_core_read(&exporter, sizeof(exporter), &dmabuf->exp_name))
+ return 1;
+
+ /* Buffers are not required to be named */
+ if (pname) {
+ if (bpf_probe_read_kernel(name, sizeof(name), pname))
+ return 1;
+
+ /* Name strings can be provided by userspace */
+ sanitize_string(name, sizeof(name));
+ }
+
+ BPF_SEQ_PRINTF(seq, "%lu\n%llu\n%s\n%s\n", inode, size, name, exporter);
+ return 0;
+}
+
+SEC("syscall")
+int iter_dmabuf_for_each(const void *ctx)
+{
+ struct dma_buf *d;
+
+ bpf_for_each(dmabuf, d) {
+ char name[DMA_BUF_NAME_LEN];
+ const char *pname;
+ bool *found;
+ long len;
+ int i;
+
+ if (bpf_core_read(&pname, sizeof(pname), &d->name))
+ return 1;
+
+ /* Buffers are not required to be named */
+ if (!pname)
+ continue;
+
+ len = bpf_probe_read_kernel_str(name, sizeof(name), pname);
+ if (len < 0)
+ return 1;
+
+ /*
+ * The entire name buffer is used as a map key.
+ * Zeroize any uninitialized trailing bytes after the NUL.
+ */
+ bpf_for(i, len, DMA_BUF_NAME_LEN)
+ name[i] = 0;
+
+ found = bpf_map_lookup_elem(&testbuf_hash, name);
+ if (found) {
+ bool t = true;
+
+ bpf_map_update_elem(&testbuf_hash, name, &t, BPF_EXIST);
+ }
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c
new file mode 100644
index 000000000000..0bf969a0b5ed
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops.s/test_2")
+__failure __msg("attach to unsupported member test_2 of struct bpf_dummy_ops")
+int BPF_PROG(test_unsupported_field_sleepable,
+ struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
+ char a3, unsigned long a4)
+{
+ /* Tries to mark an unsleepable field in struct bpf_dummy_ops as sleepable. */
+ return 0;
+}
+
+SEC(".struct_ops")
+struct bpf_dummy_ops dummy_1 = {
+ .test_1 = NULL,
+ .test_2 = (void *)test_unsupported_field_sleepable,
+ .test_sleepable = (void *)NULL,
+};
diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
new file mode 100644
index 000000000000..ec0c595d47af
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1, struct bpf_dummy_ops_state *state)
+{
+ int ret;
+
+ /* Check that 'state' nullable status is detected correctly.
+ * If 'state' argument would be assumed non-null by verifier
+ * the code below would be deleted as dead (which it shouldn't).
+ * Hide it from the compiler behind 'asm' block to avoid
+ * unnecessary optimizations.
+ */
+ asm volatile (
+ "if %[state] != 0 goto +2;"
+ "r0 = 0xf2f3f4f5;"
+ "exit;"
+ ::[state]"p"(state));
+
+ ret = state->val;
+ state->val = 0x5a;
+ return ret;
+}
+
+__u64 test_2_args[5];
+
+SEC("struct_ops/test_2")
+int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
+ char a3, unsigned long a4)
+{
+ test_2_args[0] = state->val;
+ test_2_args[1] = a1;
+ test_2_args[2] = a2;
+ test_2_args[3] = a3;
+ test_2_args[4] = a4;
+ return 0;
+}
+
+SEC("struct_ops.s/test_sleepable")
+int BPF_PROG(test_sleepable, struct bpf_dummy_ops_state *state)
+{
+ return 0;
+}
+
+SEC(".struct_ops")
+struct bpf_dummy_ops dummy_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+ .test_sleepable = (void *)test_sleepable,
+};
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
new file mode 100644
index 000000000000..bd8f15229f5c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -0,0 +1,1737 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include <errno.h>
+#include <string.h>
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <linux/if_ether.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct test_info {
+ int x;
+ struct bpf_dynptr ptr;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct bpf_dynptr);
+} array_map1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct test_info);
+} array_map2 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} array_map3 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array_map4 SEC(".maps");
+
+struct sample {
+ int pid;
+ long value;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
+} ringbuf SEC(".maps");
+
+int err, val;
+
+static int get_map_val_dynptr(struct bpf_dynptr *ptr)
+{
+ __u32 key = 0, *map_val;
+
+ bpf_map_update_elem(&array_map3, &key, &val, 0);
+
+ map_val = bpf_map_lookup_elem(&array_map3, &key);
+ if (!map_val)
+ return -ENOENT;
+
+ bpf_dynptr_from_mem(map_val, sizeof(*map_val), 0, ptr);
+
+ return 0;
+}
+
+/* Every bpf_ringbuf_reserve_dynptr call must have a corresponding
+ * bpf_ringbuf_submit/discard_dynptr call
+ */
+SEC("?raw_tp")
+__failure __msg("Unreleased reference id=2")
+int ringbuf_missing_release1(void *ctx)
+{
+ struct bpf_dynptr ptr = {};
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ /* missing a call to bpf_ringbuf_discard/submit_dynptr */
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("Unreleased reference id=4")
+int ringbuf_missing_release2(void *ctx)
+{
+ struct bpf_dynptr ptr1, ptr2;
+ struct sample *sample;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr1);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
+
+ sample = bpf_dynptr_data(&ptr1, 0, sizeof(*sample));
+ if (!sample) {
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+ return 0;
+ }
+
+ bpf_ringbuf_submit_dynptr(&ptr1, 0);
+
+ /* missing a call to bpf_ringbuf_discard/submit_dynptr on ptr2 */
+
+ return 0;
+}
+
+static int missing_release_callback_fn(__u32 index, void *data)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ /* missing a call to bpf_ringbuf_discard/submit_dynptr */
+
+ return 0;
+}
+
+/* Any dynptr initialized within a callback must have bpf_dynptr_put called */
+SEC("?raw_tp")
+__failure __msg("Unreleased reference id")
+int ringbuf_missing_release_callback(void *ctx)
+{
+ bpf_loop(10, missing_release_callback_fn, NULL, 0);
+ return 0;
+}
+
+/* Can't call bpf_ringbuf_submit/discard_dynptr on a non-initialized dynptr */
+SEC("?raw_tp")
+__failure __msg("arg 1 is an unacquired reference")
+int ringbuf_release_uninit_dynptr(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* A dynptr can't be used after it has been invalidated */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #2")
+int use_after_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(read_data), 0, &ptr);
+
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ return 0;
+}
+
+/* Can't call non-dynptr ringbuf APIs on a dynptr ringbuf sample */
+SEC("?raw_tp")
+__failure __msg("type=mem expected=ringbuf_mem")
+int ringbuf_invalid_api(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct sample *sample;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
+ sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
+ if (!sample)
+ goto done;
+
+ sample->pid = 123;
+
+ /* invalid API use. need to use dynptr API to submit/discard */
+ bpf_ringbuf_submit(sample, 0);
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* Can't add a dynptr to a map */
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int add_dynptr_to_map1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ int key = 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ /* this should fail */
+ bpf_map_update_elem(&array_map1, &key, &ptr, 0);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* Can't add a struct with an embedded dynptr to a map */
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int add_dynptr_to_map2(void *ctx)
+{
+ struct test_info x;
+ int key = 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &x.ptr);
+
+ /* this should fail */
+ bpf_map_update_elem(&array_map2, &key, &x, 0);
+
+ bpf_ringbuf_submit_dynptr(&x.ptr, 0);
+
+ return 0;
+}
+
+/* A data slice can't be accessed out of bounds */
+SEC("?raw_tp")
+__failure __msg("value is outside of the allowed memory range")
+int data_slice_out_of_bounds_ringbuf(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ void *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
+
+ data = bpf_dynptr_data(&ptr, 0, 8);
+ if (!data)
+ goto done;
+
+ /* can't index out of bounds of the data slice */
+ val = *((char *)data + 8);
+
+done:
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* A data slice can't be accessed out of bounds */
+SEC("?tc")
+__failure __msg("value is outside of the allowed memory range")
+int data_slice_out_of_bounds_skb(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ /* this should fail */
+ *(__u8*)(hdr + 1) = 1;
+
+ return SK_PASS;
+}
+
+SEC("?raw_tp")
+__failure __msg("value is outside of the allowed memory range")
+int data_slice_out_of_bounds_map_value(void *ctx)
+{
+ __u32 map_val;
+ struct bpf_dynptr ptr;
+ void *data;
+
+ get_map_val_dynptr(&ptr);
+
+ data = bpf_dynptr_data(&ptr, 0, sizeof(map_val));
+ if (!data)
+ return 0;
+
+ /* can't index out of bounds of the data slice */
+ val = *((char *)data + (sizeof(map_val) + 1));
+
+ return 0;
+}
+
+/* A data slice can't be used after it has been released */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int data_slice_use_after_release1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct sample *sample;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
+ sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
+ if (!sample)
+ goto done;
+
+ sample->pid = 123;
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ val = sample->pid;
+
+ return 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* A data slice can't be used after it has been released.
+ *
+ * This tests the case where the data slice tracks a dynptr (ptr2)
+ * that is at a non-zero offset from the frame pointer (ptr1 is at fp,
+ * ptr2 is at fp - 16).
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int data_slice_use_after_release2(void *ctx)
+{
+ struct bpf_dynptr ptr1, ptr2;
+ struct sample *sample;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr1);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
+
+ sample = bpf_dynptr_data(&ptr2, 0, sizeof(*sample));
+ if (!sample)
+ goto done;
+
+ sample->pid = 23;
+
+ bpf_ringbuf_submit_dynptr(&ptr2, 0);
+
+ /* this should fail */
+ sample->pid = 23;
+
+ bpf_ringbuf_submit_dynptr(&ptr1, 0);
+
+ return 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ return 0;
+}
+
+/* A data slice must be first checked for NULL */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'mem_or_null'")
+int data_slice_missing_null_check1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ void *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
+
+ data = bpf_dynptr_data(&ptr, 0, 8);
+
+ /* missing if (!data) check */
+
+ /* this should fail */
+ *(__u8 *)data = 3;
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* A data slice can't be dereferenced if it wasn't checked for null */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'mem_or_null'")
+int data_slice_missing_null_check2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u64 *data1, *data2;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
+
+ data1 = bpf_dynptr_data(&ptr, 0, 8);
+ data2 = bpf_dynptr_data(&ptr, 0, 8);
+ if (data1)
+ /* this should fail */
+ *data2 = 3;
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* Can't pass in a dynptr as an arg to a helper function that doesn't take in a
+ * dynptr argument
+ */
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int invalid_helper1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ get_map_val_dynptr(&ptr);
+
+ /* this should fail */
+ bpf_strncmp((const char *)&ptr, sizeof(ptr), "hello!");
+
+ return 0;
+}
+
+/* A dynptr can't be passed into a helper function at a non-zero offset */
+SEC("?raw_tp")
+__failure __msg("cannot pass in dynptr at an offset=-8")
+int invalid_helper2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ get_map_val_dynptr(&ptr);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0, 0);
+ return 0;
+}
+
+/* A bpf_dynptr is invalidated if it's been written into */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #0")
+int invalid_write1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ void *data;
+ __u8 x = 0;
+
+ get_map_val_dynptr(&ptr);
+
+ memcpy(&ptr, &x, sizeof(x));
+
+ /* this should fail */
+ data = bpf_dynptr_data(&ptr, 0, 1);
+ __sink(data);
+
+ return 0;
+}
+
+/*
+ * A bpf_dynptr can't be used as a dynptr if it has been written into at a fixed
+ * offset
+ */
+SEC("?raw_tp")
+__failure __msg("cannot overwrite referenced dynptr")
+int invalid_write2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char read_data[64];
+ __u8 x = 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ memcpy((void *)&ptr + 8, &x, sizeof(x));
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/*
+ * A bpf_dynptr can't be used as a dynptr if it has been written into at a
+ * non-const offset
+ */
+SEC("?raw_tp")
+__failure __msg("cannot overwrite referenced dynptr")
+int invalid_write3(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char stack_buf[16];
+ unsigned long len;
+ __u8 x = 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
+
+ memcpy(stack_buf, &val, sizeof(val));
+ len = stack_buf[0] & 0xf;
+
+ memcpy((void *)&ptr + len, &x, sizeof(x));
+
+ /* this should fail */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+static int invalid_write4_callback(__u32 index, void *data)
+{
+ *(__u32 *)data = 123;
+
+ return 0;
+}
+
+/* If the dynptr is written into in a callback function, it should
+ * be invalidated as a dynptr
+ */
+SEC("?raw_tp")
+__failure __msg("cannot overwrite referenced dynptr")
+int invalid_write4(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ bpf_loop(10, invalid_write4_callback, &ptr, 0);
+
+ /* this should fail */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* A globally-defined bpf_dynptr can't be used (it must reside as a stack frame) */
+struct bpf_dynptr global_dynptr;
+
+SEC("?raw_tp")
+__failure __msg("type=map_value expected=fp")
+int global(void *ctx)
+{
+ /* this should fail */
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &global_dynptr);
+
+ bpf_ringbuf_discard_dynptr(&global_dynptr, 0);
+
+ return 0;
+}
+
+/* A direct read should fail */
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int invalid_read1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ /* this should fail */
+ val = *(int *)&ptr;
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* A direct read at an offset should fail */
+SEC("?raw_tp")
+__failure __msg("cannot pass in dynptr at an offset")
+int invalid_read2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ get_map_val_dynptr(&ptr);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 1, 0, 0);
+
+ return 0;
+}
+
+/* A direct read at an offset into the lower stack slot should fail */
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int invalid_read3(void *ctx)
+{
+ struct bpf_dynptr ptr1, ptr2;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr1);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr2);
+
+ /* this should fail */
+ memcpy(&val, (void *)&ptr1 + 8, sizeof(val));
+
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+
+ return 0;
+}
+
+static int invalid_read4_callback(__u32 index, void *data)
+{
+ /* this should fail */
+ val = *(__u32 *)data;
+
+ return 0;
+}
+
+/* A direct read within a callback function should fail */
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int invalid_read4(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ bpf_loop(10, invalid_read4_callback, &ptr, 0);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* Initializing a dynptr on an offset should fail */
+SEC("?raw_tp")
+__failure __msg("cannot pass in dynptr at an offset=0")
+int invalid_offset(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr + 1);
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* Can't release a dynptr twice */
+SEC("?raw_tp")
+__failure __msg("arg 1 is an unacquired reference")
+int release_twice(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ /* this second release should fail */
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+static int release_twice_callback_fn(__u32 index, void *data)
+{
+ /* this should fail */
+ bpf_ringbuf_discard_dynptr(data, 0);
+
+ return 0;
+}
+
+/* Test that releasing a dynptr twice, where one of the releases happens
+ * within a callback function, fails
+ */
+SEC("?raw_tp")
+__failure __msg("arg 1 is an unacquired reference")
+int release_twice_callback(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 32, 0, &ptr);
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ bpf_loop(10, release_twice_callback_fn, &ptr, 0);
+
+ return 0;
+}
+
+/* Reject unsupported local mem types for dynptr_from_mem API */
+SEC("?raw_tp")
+__failure __msg("Unsupported reg type fp for bpf_dynptr_from_mem data")
+int dynptr_from_mem_invalid_api(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ int x = 0;
+
+ /* this should fail */
+ bpf_dynptr_from_mem(&x, sizeof(x), 0, &ptr);
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
+int dynptr_pruning_overwrite(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r9 = 0xeB9F; \
+ r6 = %[ringbuf] ll; \
+ r1 = r6; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ if r0 == 0 goto pjmp1; \
+ goto pjmp2; \
+ pjmp1: \
+ *(u64 *)(r10 - 16) = r9; \
+ pjmp2: \
+ r1 = r10; \
+ r1 += -16; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm_addr(ringbuf)
+ : __clobber_all
+ );
+ return 0;
+}
+
+SEC("?tc")
+__success __msg("12: safe") __log_level(2)
+int dynptr_pruning_stacksafe(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r9 = 0xeB9F; \
+ r6 = %[ringbuf] ll; \
+ r1 = r6; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ if r0 == 0 goto stjmp1; \
+ goto stjmp2; \
+ stjmp1: \
+ r9 = r9; \
+ stjmp2: \
+ r1 = r10; \
+ r1 += -16; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm_addr(ringbuf)
+ : __clobber_all
+ );
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
+int dynptr_pruning_type_confusion(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r6 = %[array_map4] ll; \
+ r7 = %[ringbuf] ll; \
+ r1 = r6; \
+ r2 = r10; \
+ r2 += -8; \
+ r9 = 0; \
+ *(u64 *)(r2 + 0) = r9; \
+ r3 = r10; \
+ r3 += -24; \
+ r9 = 0xeB9FeB9F; \
+ *(u64 *)(r10 - 16) = r9; \
+ *(u64 *)(r10 - 24) = r9; \
+ r9 = 0; \
+ r4 = 0; \
+ r8 = r2; \
+ call %[bpf_map_update_elem]; \
+ r1 = r6; \
+ r2 = r8; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto tjmp1; \
+ exit; \
+ tjmp1: \
+ r8 = r0; \
+ r1 = r7; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ r0 = *(u64 *)(r0 + 0); \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ if r0 == 0 goto tjmp2; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ goto tjmp3; \
+ tjmp2: \
+ *(u64 *)(r10 - 8) = r9; \
+ *(u64 *)(r10 - 16) = r9; \
+ r1 = r8; \
+ r1 += 8; \
+ r2 = 0; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ call %[bpf_dynptr_from_mem]; \
+ tjmp3: \
+ r1 = r10; \
+ r1 += -16; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_map_update_elem),
+ __imm(bpf_map_lookup_elem),
+ __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_dynptr_from_mem),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm_addr(array_map4),
+ __imm_addr(ringbuf)
+ : __clobber_all
+ );
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("dynptr has to be at a constant offset") __log_level(2)
+int dynptr_var_off_overwrite(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r9 = 16; \
+ *(u32 *)(r10 - 4) = r9; \
+ r8 = *(u32 *)(r10 - 4); \
+ if r8 >= 0 goto vjmp1; \
+ r0 = 1; \
+ exit; \
+ vjmp1: \
+ if r8 <= 16 goto vjmp2; \
+ r0 = 1; \
+ exit; \
+ vjmp2: \
+ r8 &= 16; \
+ r1 = %[ringbuf] ll; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -32; \
+ r4 += r8; \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ r9 = 0xeB9F; \
+ *(u64 *)(r10 - 16) = r9; \
+ r1 = r10; \
+ r1 += -32; \
+ r1 += r8; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm_addr(ringbuf)
+ : __clobber_all
+ );
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
+int dynptr_partial_slot_invalidate(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r6 = %[ringbuf] ll; \
+ r7 = %[array_map4] ll; \
+ r1 = r7; \
+ r2 = r10; \
+ r2 += -8; \
+ r9 = 0; \
+ *(u64 *)(r2 + 0) = r9; \
+ r3 = r2; \
+ r4 = 0; \
+ r8 = r2; \
+ call %[bpf_map_update_elem]; \
+ r1 = r7; \
+ r2 = r8; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto sjmp1; \
+ exit; \
+ sjmp1: \
+ r7 = r0; \
+ r1 = r6; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -24; \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ *(u64 *)(r10 - 16) = r9; \
+ r1 = r7; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ call %[bpf_dynptr_from_mem]; \
+ r1 = r10; \
+ r1 += -512; \
+ r2 = 488; \
+ r3 = r10; \
+ r3 += -24; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_dynptr_read]; \
+ r8 = 1; \
+ if r0 != 0 goto sjmp2; \
+ r8 = 0; \
+ sjmp2: \
+ r1 = r10; \
+ r1 += -24; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_map_update_elem),
+ __imm(bpf_map_lookup_elem),
+ __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm(bpf_dynptr_from_mem),
+ __imm(bpf_dynptr_read),
+ __imm_addr(ringbuf),
+ __imm_addr(array_map4)
+ : __clobber_all
+ );
+ return 0;
+}
+
+/* Test that it is allowed to overwrite unreferenced dynptr. */
+SEC("?raw_tp")
+__success
+int dynptr_overwrite_unref(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+
+ return 0;
+}
+
+/* Test that slices are invalidated on reinitializing a dynptr. */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int dynptr_invalidate_slice_reinit(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u8 *p;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+ p = bpf_dynptr_data(&ptr, 0, 1);
+ if (!p)
+ return 0;
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+ /* this should fail */
+ return *p;
+}
+
+/* Invalidation of dynptr slices on destruction of dynptr should not miss
+ * mem_or_null pointers.
+ */
+SEC("?raw_tp")
+__failure __msg("R{{[0-9]+}} type=scalar expected=percpu_ptr_")
+int dynptr_invalidate_slice_or_null(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u8 *p;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+
+ p = bpf_dynptr_data(&ptr, 0, 1);
+ *(__u8 *)&ptr = 0;
+ /* this should fail */
+ bpf_this_cpu_ptr(p);
+ return 0;
+}
+
+/* Destruction of dynptr should also any slices obtained from it */
+SEC("?raw_tp")
+__failure __msg("R{{[0-9]+}} invalid mem access 'scalar'")
+int dynptr_invalidate_slice_failure(void *ctx)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u8 *p1, *p2;
+
+ if (get_map_val_dynptr(&ptr1))
+ return 0;
+ if (get_map_val_dynptr(&ptr2))
+ return 0;
+
+ p1 = bpf_dynptr_data(&ptr1, 0, 1);
+ if (!p1)
+ return 0;
+ p2 = bpf_dynptr_data(&ptr2, 0, 1);
+ if (!p2)
+ return 0;
+
+ *(__u8 *)&ptr1 = 0;
+ /* this should fail */
+ return *p1;
+}
+
+/* Invalidation of slices should be scoped and should not prevent dereferencing
+ * slices of another dynptr after destroying unrelated dynptr
+ */
+SEC("?raw_tp")
+__success
+int dynptr_invalidate_slice_success(void *ctx)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u8 *p1, *p2;
+
+ if (get_map_val_dynptr(&ptr1))
+ return 1;
+ if (get_map_val_dynptr(&ptr2))
+ return 1;
+
+ p1 = bpf_dynptr_data(&ptr1, 0, 1);
+ if (!p1)
+ return 1;
+ p2 = bpf_dynptr_data(&ptr2, 0, 1);
+ if (!p2)
+ return 1;
+
+ *(__u8 *)&ptr1 = 0;
+ return *p2;
+}
+
+/* Overwriting referenced dynptr should be rejected */
+SEC("?raw_tp")
+__failure __msg("cannot overwrite referenced dynptr")
+int dynptr_overwrite_ref(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+ /* this should fail */
+ if (get_map_val_dynptr(&ptr))
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* Reject writes to dynptr slot from bpf_dynptr_read */
+SEC("?raw_tp")
+__failure __msg("potential write to dynptr at off=-16")
+int dynptr_read_into_slot(void *ctx)
+{
+ union {
+ struct {
+ char _pad[48];
+ struct bpf_dynptr ptr;
+ };
+ char buf[64];
+ } data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &data.ptr);
+ /* this should fail */
+ bpf_dynptr_read(data.buf, sizeof(data.buf), &data.ptr, 0, 0);
+
+ return 0;
+}
+
+/* bpf_dynptr_slice()s are read-only and cannot be written to */
+SEC("?tc")
+__failure __msg("R{{[0-9]+}} cannot write into rdonly_mem")
+int skb_invalid_slice_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ /* this should fail */
+ hdr->h_proto = 1;
+
+ return SK_PASS;
+}
+
+/* The read-only data slice is invalidated whenever a helper changes packet data */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int skb_invalid_data_slice1(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ val = hdr->h_proto;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ val = hdr->h_proto;
+
+ return SK_PASS;
+}
+
+/* The read-write data slice is invalidated whenever a helper changes packet data */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int skb_invalid_data_slice2(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ hdr->h_proto = 123;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ hdr->h_proto = 1;
+
+ return SK_PASS;
+}
+
+/* The read-only data slice is invalidated whenever bpf_dynptr_write() is called */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int skb_invalid_data_slice3(struct __sk_buff *skb)
+{
+ char write_data[64] = "hello there, world!!";
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ val = hdr->h_proto;
+
+ bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
+
+ /* this should fail */
+ val = hdr->h_proto;
+
+ return SK_PASS;
+}
+
+/* The read-write data slice is invalidated whenever bpf_dynptr_write() is called */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int skb_invalid_data_slice4(struct __sk_buff *skb)
+{
+ char write_data[64] = "hello there, world!!";
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ hdr->h_proto = 123;
+
+ bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
+
+ /* this should fail */
+ hdr->h_proto = 1;
+
+ return SK_PASS;
+}
+
+/* The read-only data slice is invalidated whenever a helper changes packet data */
+SEC("?xdp")
+__failure __msg("invalid mem access 'scalar'")
+int xdp_invalid_data_slice1(struct xdp_md *xdp)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr);
+ hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ val = hdr->h_proto;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
+ return XDP_DROP;
+
+ /* this should fail */
+ val = hdr->h_proto;
+
+ return XDP_PASS;
+}
+
+/* The read-write data slice is invalidated whenever a helper changes packet data */
+SEC("?xdp")
+__failure __msg("invalid mem access 'scalar'")
+int xdp_invalid_data_slice2(struct xdp_md *xdp)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr);
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ hdr->h_proto = 9;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
+ return XDP_DROP;
+
+ /* this should fail */
+ hdr->h_proto = 1;
+
+ return XDP_PASS;
+}
+
+/* Only supported prog type can create skb-type dynptrs */
+SEC("?raw_tp")
+__failure __msg("calling kernel function bpf_dynptr_from_skb is not allowed")
+int skb_invalid_ctx(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_from_skb(ctx, 0, &ptr);
+
+ return 0;
+}
+
+SEC("fentry/skb_tx_error")
+__failure __msg("must be referenced or trusted")
+int BPF_PROG(skb_invalid_ctx_fentry, void *skb)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ return 0;
+}
+
+SEC("fexit/skb_tx_error")
+__failure __msg("must be referenced or trusted")
+int BPF_PROG(skb_invalid_ctx_fexit, void *skb)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ return 0;
+}
+
+/* Reject writes to dynptr slot for uninit arg */
+SEC("?raw_tp")
+__failure __msg("potential write to dynptr at off=-16")
+int uninit_write_into_slot(void *ctx)
+{
+ struct {
+ char buf[64];
+ struct bpf_dynptr ptr;
+ } data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 80, 0, &data.ptr);
+ /* this should fail */
+ bpf_get_current_comm(data.buf, 80);
+
+ return 0;
+}
+
+/* Only supported prog type can create xdp-type dynptrs */
+SEC("?raw_tp")
+__failure __msg("calling kernel function bpf_dynptr_from_xdp is not allowed")
+int xdp_invalid_ctx(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_from_xdp(ctx, 0, &ptr);
+
+ return 0;
+}
+
+__u32 hdr_size = sizeof(struct ethhdr);
+/* Can't pass in variable-sized len to bpf_dynptr_slice */
+SEC("?tc")
+__failure __msg("unbounded memory access")
+int dynptr_slice_var_len1(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ /* this should fail */
+ hdr = bpf_dynptr_slice(&ptr, 0, buffer, hdr_size);
+ if (!hdr)
+ return SK_DROP;
+
+ return SK_PASS;
+}
+
+/* Can't pass in variable-sized len to bpf_dynptr_slice */
+SEC("?tc")
+__failure __msg("must be a known constant")
+int dynptr_slice_var_len2(struct __sk_buff *skb)
+{
+ char buffer[sizeof(struct ethhdr)] = {};
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ if (hdr_size <= sizeof(buffer)) {
+ /* this should fail */
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, hdr_size);
+ if (!hdr)
+ return SK_DROP;
+ hdr->h_proto = 12;
+ }
+
+ return SK_PASS;
+}
+
+static int callback(__u32 index, void *data)
+{
+ *(__u32 *)data = 123;
+
+ return 0;
+}
+
+/* If the dynptr is written into in a callback function, its data
+ * slices should be invalidated as well.
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int invalid_data_slices(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u32 *slice;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+
+ slice = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
+ if (!slice)
+ return 0;
+
+ bpf_loop(10, callback, &ptr, 0);
+
+ /* this should fail */
+ *slice = 1;
+
+ return 0;
+}
+
+/* Program types that don't allow writes to packet data should fail if
+ * bpf_dynptr_slice_rdwr is called
+ */
+SEC("cgroup_skb/ingress")
+__failure __msg("the prog does not allow writes to packet data")
+int invalid_slice_rdwr_rdonly(struct __sk_buff *skb)
+{
+ char buffer[sizeof(struct ethhdr)] = {};
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ /* this should fail since cgroup_skb doesn't allow
+ * changing packet data
+ */
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ __sink(hdr);
+
+ return 0;
+}
+
+/* bpf_dynptr_adjust can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #0")
+int dynptr_adjust_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr = {};
+
+ /* this should fail */
+ bpf_dynptr_adjust(&ptr, 1, 2);
+
+ return 0;
+}
+
+/* bpf_dynptr_is_null can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #0")
+int dynptr_is_null_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr = {};
+
+ /* this should fail */
+ bpf_dynptr_is_null(&ptr);
+
+ return 0;
+}
+
+/* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #0")
+int dynptr_is_rdonly_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr = {};
+
+ /* this should fail */
+ bpf_dynptr_is_rdonly(&ptr);
+
+ return 0;
+}
+
+/* bpf_dynptr_size can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #0")
+int dynptr_size_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr = {};
+
+ /* this should fail */
+ bpf_dynptr_size(&ptr);
+
+ return 0;
+}
+
+/* Only initialized dynptrs can be cloned */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #0")
+int clone_invalid1(void *ctx)
+{
+ struct bpf_dynptr ptr1 = {};
+ struct bpf_dynptr ptr2;
+
+ /* this should fail */
+ bpf_dynptr_clone(&ptr1, &ptr2);
+
+ return 0;
+}
+
+/* Can't overwrite an existing dynptr when cloning */
+SEC("?xdp")
+__failure __msg("cannot overwrite referenced dynptr")
+int clone_invalid2(struct xdp_md *xdp)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr clone;
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr1);
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone);
+
+ /* this should fail */
+ bpf_dynptr_clone(&ptr1, &clone);
+
+ bpf_ringbuf_submit_dynptr(&clone, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate its clones */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #2")
+int clone_invalidate1(void *ctx)
+{
+ struct bpf_dynptr clone;
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate its parent */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #2")
+int clone_invalidate2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ bpf_ringbuf_submit_dynptr(&clone, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate its siblings */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #2")
+int clone_invalidate3(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone1;
+ struct bpf_dynptr clone2;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone1);
+
+ bpf_dynptr_clone(&ptr, &clone2);
+
+ bpf_ringbuf_submit_dynptr(&clone2, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &clone1, 0, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate any data slices
+ * of its clones
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_invalidate4(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+ data = bpf_dynptr_data(&clone, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate any data slices
+ * of its parent
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_invalidate5(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+ data = bpf_dynptr_data(&ptr, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ bpf_ringbuf_submit_dynptr(&clone, 0);
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate any data slices
+ * of its sibling
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_invalidate6(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone1;
+ struct bpf_dynptr clone2;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone1);
+
+ bpf_dynptr_clone(&ptr, &clone2);
+
+ data = bpf_dynptr_data(&clone1, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_ringbuf_submit_dynptr(&clone2, 0);
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* A skb clone's data slices should be invalid anytime packet data changes */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int clone_skb_packet_data(struct __sk_buff *skb)
+{
+ char buffer[sizeof(__u32)] = {};
+ struct bpf_dynptr clone;
+ struct bpf_dynptr ptr;
+ __u32 *data;
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+ data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
+ if (!data)
+ return XDP_DROP;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* A xdp clone's data slices should be invalid anytime packet data changes */
+SEC("?xdp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_xdp_packet_data(struct xdp_md *xdp)
+{
+ char buffer[sizeof(__u32)] = {};
+ struct bpf_dynptr clone;
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ __u32 *data;
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+ data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
+ if (!data)
+ return XDP_DROP;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
+ return XDP_DROP;
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* Buffers that are provided must be sufficiently long */
+SEC("?cgroup_skb/egress")
+__failure __msg("memory, len pair leads to invalid memory access")
+int test_dynptr_skb_small_buff(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ char buffer[8] = {};
+ __u64 *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This may return NULL. SKB may require a buffer */
+ data = bpf_dynptr_slice(&ptr, 0, buffer, 9);
+
+ return !!data;
+}
+
+__noinline long global_call_bpf_dynptr(const struct bpf_dynptr *dynptr)
+{
+ long ret = 0;
+ /* Avoid leaving this global function empty to avoid having the compiler
+ * optimize away the call to this global function.
+ */
+ __sink(ret);
+ return ret;
+}
+
+SEC("?raw_tp")
+__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr")
+int test_dynptr_reg_type(void *ctx)
+{
+ struct task_struct *current = NULL;
+ /* R1 should be holding a PTR_TO_BTF_ID, so this shouldn't be a
+ * reg->type that can be passed to a function accepting a
+ * ARG_PTR_TO_DYNPTR | MEM_RDONLY. process_dynptr_func() should catch
+ * this.
+ */
+ global_call_bpf_dynptr((const struct bpf_dynptr *)current);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c
new file mode 100644
index 000000000000..8315273cb900
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dynptr_success.c
@@ -0,0 +1,1082 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+#define PAGE_SIZE_64K 65536
+
+char _license[] SEC("license") = "GPL";
+
+int pid, err, val;
+
+struct ringbuf_sample {
+ int pid;
+ int seq;
+ long value;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
+} ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} array_map SEC(".maps");
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_read_write(void *ctx)
+{
+ char write_data[64] = "hello there, world!!";
+ char read_data[64] = {};
+ struct bpf_dynptr ptr;
+ int i;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(write_data), 0, &ptr);
+
+ /* Write data into the dynptr */
+ err = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
+
+ /* Read the data that was written into the dynptr */
+ err = err ?: bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ /* Ensure the data we read matches the data we wrote */
+ for (i = 0; i < sizeof(read_data); i++) {
+ if (read_data[i] != write_data[i]) {
+ err = 1;
+ break;
+ }
+ }
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_data(void *ctx)
+{
+ __u32 key = 0, val = 235, *map_val;
+ struct bpf_dynptr ptr;
+ __u32 map_val_size;
+ void *data;
+
+ map_val_size = sizeof(*map_val);
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ bpf_map_update_elem(&array_map, &key, &val, 0);
+
+ map_val = bpf_map_lookup_elem(&array_map, &key);
+ if (!map_val) {
+ err = 1;
+ return 0;
+ }
+
+ bpf_dynptr_from_mem(map_val, map_val_size, 0, &ptr);
+
+ /* Try getting a data slice that is out of range */
+ data = bpf_dynptr_data(&ptr, map_val_size + 1, 1);
+ if (data) {
+ err = 2;
+ return 0;
+ }
+
+ /* Try getting more bytes than available */
+ data = bpf_dynptr_data(&ptr, 0, map_val_size + 1);
+ if (data) {
+ err = 3;
+ return 0;
+ }
+
+ data = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
+ if (!data) {
+ err = 4;
+ return 0;
+ }
+
+ *(__u32 *)data = 999;
+
+ err = bpf_probe_read_kernel(&val, sizeof(val), data);
+ if (err)
+ return 0;
+
+ if (val != *(int *)data)
+ err = 5;
+
+ return 0;
+}
+
+static int ringbuf_callback(__u32 index, void *data)
+{
+ struct ringbuf_sample *sample;
+
+ struct bpf_dynptr *ptr = (struct bpf_dynptr *)data;
+
+ sample = bpf_dynptr_data(ptr, 0, sizeof(*sample));
+ if (!sample)
+ err = 2;
+ else
+ sample->pid += index;
+
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_ringbuf(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct ringbuf_sample *sample;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ val = 100;
+
+ /* check that you can reserve a dynamic size reservation */
+ err = bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ sample = err ? NULL : bpf_dynptr_data(&ptr, 0, sizeof(*sample));
+ if (!sample) {
+ err = 1;
+ goto done;
+ }
+
+ sample->pid = 10;
+
+ /* Can pass dynptr to callback functions */
+ bpf_loop(10, ringbuf_callback, &ptr, 0);
+
+ if (sample->pid != 55)
+ err = 2;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("?cgroup_skb/egress")
+int test_skb_readonly(struct __sk_buff *skb)
+{
+ __u8 write_data[2] = {1, 2};
+ struct bpf_dynptr ptr;
+ int ret;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* since cgroup skbs are read only, writes should fail */
+ ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
+ if (ret != -EINVAL) {
+ err = 2;
+ return 1;
+ }
+
+ return 1;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_skb_data(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ __u64 *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This should return NULL. Must use bpf_dynptr_slice API */
+ data = bpf_dynptr_data(&ptr, 0, 1);
+ if (data) {
+ err = 2;
+ return 1;
+ }
+
+ return 1;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_adjust(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u32 bytes = 64;
+ __u32 off = 10;
+ __u32 trim = 15;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr);
+ if (err) {
+ err = 1;
+ goto done;
+ }
+
+ if (bpf_dynptr_size(&ptr) != bytes) {
+ err = 2;
+ goto done;
+ }
+
+ /* Advance the dynptr by off */
+ err = bpf_dynptr_adjust(&ptr, off, bpf_dynptr_size(&ptr));
+ if (err) {
+ err = 3;
+ goto done;
+ }
+
+ if (bpf_dynptr_size(&ptr) != bytes - off) {
+ err = 4;
+ goto done;
+ }
+
+ /* Trim the dynptr */
+ err = bpf_dynptr_adjust(&ptr, off, 15);
+ if (err) {
+ err = 5;
+ goto done;
+ }
+
+ /* Check that the size was adjusted correctly */
+ if (bpf_dynptr_size(&ptr) != trim - off) {
+ err = 6;
+ goto done;
+ }
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_adjust_err(void *ctx)
+{
+ char write_data[45] = "hello there, world!!";
+ struct bpf_dynptr ptr;
+ __u32 size = 64;
+ __u32 off = 20;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
+ err = 1;
+ goto done;
+ }
+
+ /* Check that start can't be greater than end */
+ if (bpf_dynptr_adjust(&ptr, 5, 1) != -EINVAL) {
+ err = 2;
+ goto done;
+ }
+
+ /* Check that start can't be greater than size */
+ if (bpf_dynptr_adjust(&ptr, size + 1, size + 1) != -ERANGE) {
+ err = 3;
+ goto done;
+ }
+
+ /* Check that end can't be greater than size */
+ if (bpf_dynptr_adjust(&ptr, 0, size + 1) != -ERANGE) {
+ err = 4;
+ goto done;
+ }
+
+ if (bpf_dynptr_adjust(&ptr, off, size)) {
+ err = 5;
+ goto done;
+ }
+
+ /* Check that you can't write more bytes than available into the dynptr
+ * after you've adjusted it
+ */
+ if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
+ err = 6;
+ goto done;
+ }
+
+ /* Check that even after adjusting, submitting/discarding
+ * a ringbuf dynptr works
+ */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+ return 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_zero_size_dynptr(void *ctx)
+{
+ char write_data = 'x', read_data;
+ struct bpf_dynptr ptr;
+ __u32 size = 64;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
+ err = 1;
+ goto done;
+ }
+
+ /* After this, the dynptr has a size of 0 */
+ if (bpf_dynptr_adjust(&ptr, size, size)) {
+ err = 2;
+ goto done;
+ }
+
+ /* Test that reading + writing non-zero bytes is not ok */
+ if (bpf_dynptr_read(&read_data, sizeof(read_data), &ptr, 0, 0) != -E2BIG) {
+ err = 3;
+ goto done;
+ }
+
+ if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
+ err = 4;
+ goto done;
+ }
+
+ /* Test that reading + writing 0 bytes from a 0-size dynptr is ok */
+ if (bpf_dynptr_read(&read_data, 0, &ptr, 0, 0)) {
+ err = 5;
+ goto done;
+ }
+
+ if (bpf_dynptr_write(&ptr, 0, &write_data, 0, 0)) {
+ err = 6;
+ goto done;
+ }
+
+ err = 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_is_null(void *ctx)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u64 size = 4;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ /* Pass in invalid flags, get back an invalid dynptr */
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) {
+ err = 1;
+ goto exit_early;
+ }
+
+ /* Test that the invalid dynptr is null */
+ if (!bpf_dynptr_is_null(&ptr1)) {
+ err = 2;
+ goto exit_early;
+ }
+
+ /* Get a valid dynptr */
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) {
+ err = 3;
+ goto exit;
+ }
+
+ /* Test that the valid dynptr is not null */
+ if (bpf_dynptr_is_null(&ptr2)) {
+ err = 4;
+ goto exit;
+ }
+
+exit:
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+exit_early:
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ return 0;
+}
+
+SEC("cgroup_skb/egress")
+int test_dynptr_is_rdonly(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ struct bpf_dynptr ptr3;
+
+ /* Pass in invalid flags, get back an invalid dynptr */
+ if (bpf_dynptr_from_skb(skb, 123, &ptr1) != -EINVAL) {
+ err = 1;
+ return 0;
+ }
+
+ /* Test that an invalid dynptr is_rdonly returns false */
+ if (bpf_dynptr_is_rdonly(&ptr1)) {
+ err = 2;
+ return 0;
+ }
+
+ /* Get a read-only dynptr */
+ if (bpf_dynptr_from_skb(skb, 0, &ptr2)) {
+ err = 3;
+ return 0;
+ }
+
+ /* Test that the dynptr is read-only */
+ if (!bpf_dynptr_is_rdonly(&ptr2)) {
+ err = 4;
+ return 0;
+ }
+
+ /* Get a read-writeable dynptr */
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) {
+ err = 5;
+ goto done;
+ }
+
+ /* Test that the dynptr is read-only */
+ if (bpf_dynptr_is_rdonly(&ptr3)) {
+ err = 6;
+ goto done;
+ }
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr3, 0);
+ return 0;
+}
+
+SEC("cgroup_skb/egress")
+int test_dynptr_clone(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u32 off = 2, size;
+
+ /* Get a dynptr */
+ if (bpf_dynptr_from_skb(skb, 0, &ptr1)) {
+ err = 1;
+ return 0;
+ }
+
+ if (bpf_dynptr_adjust(&ptr1, off, bpf_dynptr_size(&ptr1))) {
+ err = 2;
+ return 0;
+ }
+
+ /* Clone the dynptr */
+ if (bpf_dynptr_clone(&ptr1, &ptr2)) {
+ err = 3;
+ return 0;
+ }
+
+ size = bpf_dynptr_size(&ptr1);
+
+ /* Check that the clone has the same size and rd-only */
+ if (bpf_dynptr_size(&ptr2) != size) {
+ err = 4;
+ return 0;
+ }
+
+ if (bpf_dynptr_is_rdonly(&ptr2) != bpf_dynptr_is_rdonly(&ptr1)) {
+ err = 5;
+ return 0;
+ }
+
+ /* Advance and trim the original dynptr */
+ bpf_dynptr_adjust(&ptr1, 5, 5);
+
+ /* Check that only original dynptr was affected, and the clone wasn't */
+ if (bpf_dynptr_size(&ptr2) != size) {
+ err = 6;
+ return 0;
+ }
+
+ return 0;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_skb_no_buff(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ __u64 *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This may return NULL. SKB may require a buffer */
+ data = bpf_dynptr_slice(&ptr, 0, NULL, 1);
+
+ return !!data;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_skb_strcmp(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ char *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This may return NULL. SKB may require a buffer */
+ data = bpf_dynptr_slice(&ptr, 0, NULL, 10);
+ if (data) {
+ bpf_strncmp(data, 10, "foo");
+ return 1;
+ }
+
+ return 1;
+}
+
+SEC("tp_btf/kfree_skb")
+int BPF_PROG(test_dynptr_skb_tp_btf, void *skb, void *location)
+{
+ __u8 write_data[2] = {1, 2};
+ struct bpf_dynptr ptr;
+ int ret;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* since tp_btf skbs are read only, writes should fail */
+ ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
+ if (ret != -EINVAL) {
+ err = 2;
+ return 1;
+ }
+
+ return 1;
+}
+
+static inline int bpf_memcmp(const char *a, const char *b, u32 size)
+{
+ int i;
+
+ bpf_for(i, 0, size) {
+ if (a[i] != b[i])
+ return a[i] < b[i] ? -1 : 1;
+ }
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_copy(void *ctx)
+{
+ char data[] = "hello there, world!!";
+ char buf[32] = {'\0'};
+ __u32 sz = sizeof(data);
+ struct bpf_dynptr src, dst;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sz, 0, &src);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sz, 0, &dst);
+
+ /* Test basic case of copying contiguous memory backed dynptrs */
+ err = bpf_dynptr_write(&src, 0, data, sz, 0);
+ err = err ?: bpf_dynptr_copy(&dst, 0, &src, 0, sz);
+ err = err ?: bpf_dynptr_read(buf, sz, &dst, 0, 0);
+ err = err ?: bpf_memcmp(data, buf, sz);
+
+ /* Test that offsets are handled correctly */
+ err = err ?: bpf_dynptr_copy(&dst, 3, &src, 5, sz - 5);
+ err = err ?: bpf_dynptr_read(buf, sz - 5, &dst, 3, 0);
+ err = err ?: bpf_memcmp(data + 5, buf, sz - 5);
+
+ bpf_ringbuf_discard_dynptr(&src, 0);
+ bpf_ringbuf_discard_dynptr(&dst, 0);
+ return 0;
+}
+
+SEC("xdp")
+int test_dynptr_copy_xdp(struct xdp_md *xdp)
+{
+ struct bpf_dynptr ptr_buf, ptr_xdp;
+ char data[] = "qwertyuiopasdfghjkl";
+ char buf[32] = {'\0'};
+ __u32 len = sizeof(data), xdp_data_size;
+ int i, chunks = 200;
+
+ /* ptr_xdp is backed by non-contiguous memory */
+ bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
+ xdp_data_size = bpf_dynptr_size(&ptr_xdp);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, len * chunks, 0, &ptr_buf);
+
+ /* Destination dynptr is backed by non-contiguous memory */
+ bpf_for(i, 0, chunks) {
+ err = bpf_dynptr_write(&ptr_buf, i * len, data, len, 0);
+ if (err)
+ goto out;
+ }
+
+ err = bpf_dynptr_copy(&ptr_xdp, 0, &ptr_buf, 0, len * chunks);
+ if (err)
+ goto out;
+
+ bpf_for(i, 0, chunks) {
+ __builtin_memset(buf, 0, sizeof(buf));
+ err = bpf_dynptr_read(&buf, len, &ptr_xdp, i * len, 0);
+ if (err)
+ goto out;
+ if (bpf_memcmp(data, buf, len) != 0)
+ goto out;
+ }
+
+ /* Source dynptr is backed by non-contiguous memory */
+ __builtin_memset(buf, 0, sizeof(buf));
+ bpf_for(i, 0, chunks) {
+ err = bpf_dynptr_write(&ptr_buf, i * len, buf, len, 0);
+ if (err)
+ goto out;
+ }
+
+ err = bpf_dynptr_copy(&ptr_buf, 0, &ptr_xdp, 0, len * chunks);
+ if (err)
+ goto out;
+
+ bpf_for(i, 0, chunks) {
+ __builtin_memset(buf, 0, sizeof(buf));
+ err = bpf_dynptr_read(&buf, len, &ptr_buf, i * len, 0);
+ if (err)
+ goto out;
+ if (bpf_memcmp(data, buf, len) != 0)
+ goto out;
+ }
+
+ /* Both source and destination dynptrs are backed by non-contiguous memory */
+ err = bpf_dynptr_copy(&ptr_xdp, 2, &ptr_xdp, len, len * (chunks - 1));
+ if (err)
+ goto out;
+
+ bpf_for(i, 0, chunks - 1) {
+ __builtin_memset(buf, 0, sizeof(buf));
+ err = bpf_dynptr_read(&buf, len, &ptr_xdp, 2 + i * len, 0);
+ if (err)
+ goto out;
+ if (bpf_memcmp(data, buf, len) != 0)
+ goto out;
+ }
+
+ if (bpf_dynptr_copy(&ptr_xdp, xdp_data_size - 3000, &ptr_xdp, 0, len * chunks) != -E2BIG)
+ err = 1;
+
+out:
+ bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
+ return XDP_DROP;
+}
+
+char memset_zero_data[] = "data to be zeroed";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_zero(void *ctx)
+{
+ __u32 data_sz = sizeof(memset_zero_data);
+ char zeroes[32] = {'\0'};
+ struct bpf_dynptr ptr;
+
+ err = bpf_dynptr_from_mem(memset_zero_data, data_sz, 0, &ptr);
+ err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, 0);
+ err = err ?: bpf_memcmp(zeroes, memset_zero_data, data_sz);
+
+ return 0;
+}
+
+#define DYNPTR_MEMSET_VAL 42
+
+char memset_notzero_data[] = "data to be overwritten";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_notzero(void *ctx)
+{
+ u32 data_sz = sizeof(memset_notzero_data);
+ struct bpf_dynptr ptr;
+ char expected[32];
+
+ __builtin_memset(expected, DYNPTR_MEMSET_VAL, data_sz);
+
+ err = bpf_dynptr_from_mem(memset_notzero_data, data_sz, 0, &ptr);
+ err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, DYNPTR_MEMSET_VAL);
+ err = err ?: bpf_memcmp(expected, memset_notzero_data, data_sz);
+
+ return 0;
+}
+
+char memset_zero_offset_data[] = "data to be zeroed partially";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_zero_offset(void *ctx)
+{
+ char expected[] = "data to \0\0\0\0eroed partially";
+ __u32 data_sz = sizeof(memset_zero_offset_data);
+ struct bpf_dynptr ptr;
+
+ err = bpf_dynptr_from_mem(memset_zero_offset_data, data_sz, 0, &ptr);
+ err = err ?: bpf_dynptr_memset(&ptr, 8, 4, 0);
+ err = err ?: bpf_memcmp(expected, memset_zero_offset_data, data_sz);
+
+ return 0;
+}
+
+char memset_zero_adjusted_data[] = "data to be zeroed partially";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_zero_adjusted(void *ctx)
+{
+ char expected[] = "data\0\0\0\0be zeroed partially";
+ __u32 data_sz = sizeof(memset_zero_adjusted_data);
+ struct bpf_dynptr ptr;
+
+ err = bpf_dynptr_from_mem(memset_zero_adjusted_data, data_sz, 0, &ptr);
+ err = err ?: bpf_dynptr_adjust(&ptr, 4, 8);
+ err = err ?: bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0);
+ err = err ?: bpf_memcmp(expected, memset_zero_adjusted_data, data_sz);
+
+ return 0;
+}
+
+char memset_overflow_data[] = "memset overflow data";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_overflow(void *ctx)
+{
+ __u32 data_sz = sizeof(memset_overflow_data);
+ struct bpf_dynptr ptr;
+ int ret;
+
+ err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr);
+ ret = bpf_dynptr_memset(&ptr, 0, data_sz + 1, 0);
+ if (ret != -E2BIG)
+ err = 1;
+
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_overflow_offset(void *ctx)
+{
+ __u32 data_sz = sizeof(memset_overflow_data);
+ struct bpf_dynptr ptr;
+ int ret;
+
+ err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr);
+ ret = bpf_dynptr_memset(&ptr, 1, data_sz, 0);
+ if (ret != -E2BIG)
+ err = 1;
+
+ return 0;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_memset_readonly(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ int ret;
+
+ err = bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ /* cgroup skbs are read only, memset should fail */
+ ret = bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0);
+ if (ret != -EINVAL)
+ err = 1;
+
+ return 0;
+}
+
+#define min_t(type, x, y) ({ \
+ type __x = (x); \
+ type __y = (y); \
+ __x < __y ? __x : __y; })
+
+SEC("xdp")
+int test_dynptr_memset_xdp_chunks(struct xdp_md *xdp)
+{
+ u32 data_sz, chunk_sz, offset = 0;
+ const int max_chunks = 200;
+ struct bpf_dynptr ptr_xdp;
+ char expected_buf[32];
+ char buf[32];
+ int i;
+
+ __builtin_memset(expected_buf, DYNPTR_MEMSET_VAL, sizeof(expected_buf));
+
+ /* ptr_xdp is backed by non-contiguous memory */
+ bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
+ data_sz = bpf_dynptr_size(&ptr_xdp);
+
+ err = bpf_dynptr_memset(&ptr_xdp, 0, data_sz, DYNPTR_MEMSET_VAL);
+ if (err) {
+ /* bpf_dynptr_memset() eventually called bpf_xdp_pointer()
+ * where if data_sz is greater than 0xffff, -EFAULT will be
+ * returned. For 64K page size, data_sz is greater than
+ * 64K, so error is expected and let us zero out error and
+ * return success.
+ */
+ if (data_sz >= PAGE_SIZE_64K)
+ err = 0;
+ goto out;
+ }
+
+ bpf_for(i, 0, max_chunks) {
+ offset = i * sizeof(buf);
+ if (offset >= data_sz)
+ goto out;
+ chunk_sz = min_t(u32, sizeof(buf), data_sz - offset);
+ err = bpf_dynptr_read(&buf, chunk_sz, &ptr_xdp, offset, 0);
+ if (err)
+ goto out;
+ err = bpf_memcmp(buf, expected_buf, sizeof(buf));
+ if (err)
+ goto out;
+ }
+out:
+ return XDP_DROP;
+}
+
+void *user_ptr;
+/* Contains the copy of the data pointed by user_ptr.
+ * Size 384 to make it not fit into a single kernel chunk when copying
+ * but less than the maximum bpf stack size (512).
+ */
+char expected_str[384];
+__u32 test_len[7] = {0/* placeholder */, 0, 1, 2, 255, 256, 257};
+
+typedef int (*bpf_read_dynptr_fn_t)(struct bpf_dynptr *dptr, u32 off,
+ u32 size, const void *unsafe_ptr);
+
+/* Returns the offset just before the end of the maximum sized xdp fragment.
+ * Any write larger than 32 bytes will be split between 2 fragments.
+ */
+__u32 xdp_near_frag_end_offset(void)
+{
+ const __u32 headroom = 256;
+ const __u32 max_frag_size = __PAGE_SIZE - headroom - sizeof(struct skb_shared_info);
+
+ /* 32 bytes before the approximate end of the fragment */
+ return max_frag_size - 32;
+}
+
+/* Use __always_inline on test_dynptr_probe[_str][_xdp]() and callbacks
+ * of type bpf_read_dynptr_fn_t to prevent compiler from generating
+ * indirect calls that make program fail to load with "unknown opcode" error.
+ */
+static __always_inline void test_dynptr_probe(void *ptr, bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
+{
+ char buf[sizeof(expected_str)];
+ struct bpf_dynptr ptr_buf;
+ int i;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ err = bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf);
+
+ bpf_for(i, 0, ARRAY_SIZE(test_len)) {
+ __u32 len = test_len[i];
+
+ err = err ?: bpf_read_dynptr_fn(&ptr_buf, 0, test_len[i], ptr);
+ if (len > sizeof(buf))
+ break;
+ err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0);
+
+ if (err || bpf_memcmp(expected_str, buf, len))
+ err = 1;
+
+ /* Reset buffer and dynptr */
+ __builtin_memset(buf, 0, sizeof(buf));
+ err = err ?: bpf_dynptr_write(&ptr_buf, 0, buf, len, 0);
+ }
+ bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
+}
+
+static __always_inline void test_dynptr_probe_str(void *ptr,
+ bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
+{
+ char buf[sizeof(expected_str)];
+ struct bpf_dynptr ptr_buf;
+ __u32 cnt, i;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf);
+
+ bpf_for(i, 0, ARRAY_SIZE(test_len)) {
+ __u32 len = test_len[i];
+
+ cnt = bpf_read_dynptr_fn(&ptr_buf, 0, len, ptr);
+ if (cnt != len)
+ err = 1;
+
+ if (len > sizeof(buf))
+ continue;
+ err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0);
+ if (!len)
+ continue;
+ if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0')
+ err = 1;
+ }
+ bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
+}
+
+static __always_inline void test_dynptr_probe_xdp(struct xdp_md *xdp, void *ptr,
+ bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
+{
+ struct bpf_dynptr ptr_xdp;
+ char buf[sizeof(expected_str)];
+ __u32 off, i;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ off = xdp_near_frag_end_offset();
+ err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
+
+ bpf_for(i, 0, ARRAY_SIZE(test_len)) {
+ __u32 len = test_len[i];
+
+ err = err ?: bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr);
+ if (len > sizeof(buf))
+ continue;
+ err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0);
+ if (err || bpf_memcmp(expected_str, buf, len))
+ err = 1;
+ /* Reset buffer and dynptr */
+ __builtin_memset(buf, 0, sizeof(buf));
+ err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0);
+ }
+}
+
+static __always_inline void test_dynptr_probe_str_xdp(struct xdp_md *xdp, void *ptr,
+ bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
+{
+ struct bpf_dynptr ptr_xdp;
+ char buf[sizeof(expected_str)];
+ __u32 cnt, off, i;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ off = xdp_near_frag_end_offset();
+ err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
+ if (err)
+ return;
+
+ bpf_for(i, 0, ARRAY_SIZE(test_len)) {
+ __u32 len = test_len[i];
+
+ cnt = bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr);
+ if (cnt != len)
+ err = 1;
+
+ if (len > sizeof(buf))
+ continue;
+ err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0);
+
+ if (!len)
+ continue;
+ if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0')
+ err = 1;
+
+ __builtin_memset(buf, 0, sizeof(buf));
+ err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0);
+ }
+}
+
+SEC("xdp")
+int test_probe_read_user_dynptr(struct xdp_md *xdp)
+{
+ test_dynptr_probe(user_ptr, bpf_probe_read_user_dynptr);
+ if (!err)
+ test_dynptr_probe_xdp(xdp, user_ptr, bpf_probe_read_user_dynptr);
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int test_probe_read_kernel_dynptr(struct xdp_md *xdp)
+{
+ test_dynptr_probe(expected_str, bpf_probe_read_kernel_dynptr);
+ if (!err)
+ test_dynptr_probe_xdp(xdp, expected_str, bpf_probe_read_kernel_dynptr);
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int test_probe_read_user_str_dynptr(struct xdp_md *xdp)
+{
+ test_dynptr_probe_str(user_ptr, bpf_probe_read_user_str_dynptr);
+ if (!err)
+ test_dynptr_probe_str_xdp(xdp, user_ptr, bpf_probe_read_user_str_dynptr);
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int test_probe_read_kernel_str_dynptr(struct xdp_md *xdp)
+{
+ test_dynptr_probe_str(expected_str, bpf_probe_read_kernel_str_dynptr);
+ if (!err)
+ test_dynptr_probe_str_xdp(xdp, expected_str, bpf_probe_read_kernel_str_dynptr);
+ return XDP_PASS;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int test_copy_from_user_dynptr(void *ctx)
+{
+ test_dynptr_probe(user_ptr, bpf_copy_from_user_dynptr);
+ return 0;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int test_copy_from_user_str_dynptr(void *ctx)
+{
+ test_dynptr_probe_str(user_ptr, bpf_copy_from_user_str_dynptr);
+ return 0;
+}
+
+static int bpf_copy_data_from_user_task(struct bpf_dynptr *dptr, u32 off,
+ u32 size, const void *unsafe_ptr)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+
+ return bpf_copy_from_user_task_dynptr(dptr, off, size, unsafe_ptr, task);
+}
+
+static int bpf_copy_data_from_user_task_str(struct bpf_dynptr *dptr, u32 off,
+ u32 size, const void *unsafe_ptr)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+
+ return bpf_copy_from_user_task_str_dynptr(dptr, off, size, unsafe_ptr, task);
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int test_copy_from_user_task_dynptr(void *ctx)
+{
+ test_dynptr_probe(user_ptr, bpf_copy_data_from_user_task);
+ return 0;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int test_copy_from_user_task_str_dynptr(void *ctx)
+{
+ test_dynptr_probe_str(user_ptr, bpf_copy_data_from_user_task_str);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/empty_skb.c b/tools/testing/selftests/bpf/progs/empty_skb.c
new file mode 100644
index 000000000000..4b0cd6753251
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/empty_skb.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+char _license[] SEC("license") = "GPL";
+
+int ifindex;
+int ret;
+
+SEC("lwt_xmit")
+int redirect_ingress(struct __sk_buff *skb)
+{
+ ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS);
+ return 0;
+}
+
+SEC("lwt_xmit")
+int redirect_egress(struct __sk_buff *skb)
+{
+ ret = bpf_clone_redirect(skb, ifindex, 0);
+ return 0;
+}
+
+SEC("tc")
+int tc_redirect_ingress(struct __sk_buff *skb)
+{
+ ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS);
+ return 0;
+}
+
+SEC("tc")
+int tc_redirect_egress(struct __sk_buff *skb)
+{
+ ret = bpf_clone_redirect(skb, ifindex, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/epilogue_exit.c b/tools/testing/selftests/bpf/progs/epilogue_exit.c
new file mode 100644
index 000000000000..35fec7c75bef
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/epilogue_exit.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__success
+/* save __u64 *ctx to stack */
+__xlated("0: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("1: r1 = *(u64 *)(r1 +0)")
+__xlated("2: r2 = *(u64 *)(r1 +0)")
+__xlated("3: r3 = 0")
+__xlated("4: r4 = 1")
+__xlated("5: if r2 == 0x0 goto pc+10")
+__xlated("6: r0 = 0")
+__xlated("7: *(u64 *)(r1 +0) = r3")
+/* epilogue */
+__xlated("8: r1 = *(u64 *)(r10 -8)")
+__xlated("9: r1 = *(u64 *)(r1 +0)")
+__xlated("10: r6 = *(u64 *)(r1 +0)")
+__xlated("11: r6 += 10000")
+__xlated("12: *(u64 *)(r1 +0) = r6")
+__xlated("13: r0 = r6")
+__xlated("14: r0 *= 2")
+__xlated("15: exit")
+/* 2nd part of the main prog after the first exit */
+__xlated("16: *(u64 *)(r1 +0) = r4")
+__xlated("17: r0 = 1")
+/* Clear the r1 to ensure it does not have
+ * off-by-1 error and ensure it jumps back to the
+ * beginning of epilogue which initializes
+ * the r1 with the ctx ptr.
+ */
+__xlated("18: r1 = 0")
+__xlated("19: gotol pc-12")
+SEC("struct_ops/test_epilogue_exit")
+__naked int test_epilogue_exit(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r2 = *(u64 *)(r1 +0);"
+ "r3 = 0;"
+ "r4 = 1;"
+ "if r2 == 0 goto +3;"
+ "r0 = 0;"
+ "*(u64 *)(r1 + 0) = r3;"
+ "exit;"
+ "*(u64 *)(r1 + 0) = r4;"
+ "r0 = 1;"
+ "r1 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_exit = {
+ .test_epilogue = (void *)test_epilogue_exit,
+};
+
+SEC("syscall")
+__retval(20000)
+int syscall_epilogue_exit0(void *ctx)
+{
+ struct st_ops_args args = { .a = 1 };
+
+ return bpf_kfunc_st_ops_test_epilogue(&args);
+}
+
+SEC("syscall")
+__retval(20002)
+int syscall_epilogue_exit1(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_epilogue(&args);
+}
diff --git a/tools/testing/selftests/bpf/progs/epilogue_tailcall.c b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c
new file mode 100644
index 000000000000..153514691ba4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+static __noinline __used int subprog(struct st_ops_args *args)
+{
+ args->a += 1;
+ return args->a;
+}
+
+SEC("struct_ops/test_epilogue_subprog")
+int BPF_PROG(test_epilogue_subprog, struct st_ops_args *args)
+{
+ subprog(args);
+ return args->a;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+ __array(values, void (void));
+} epilogue_map SEC(".maps") = {
+ .values = {
+ [0] = (void *)&test_epilogue_subprog,
+ }
+};
+
+SEC("struct_ops/test_epilogue_tailcall")
+int test_epilogue_tailcall(unsigned long long *ctx)
+{
+ bpf_tail_call(ctx, &epilogue_map, 0);
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_tailcall = {
+ .test_epilogue = (void *)test_epilogue_tailcall,
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_subprog = {
+ .test_epilogue = (void *)test_epilogue_subprog,
+};
+
+SEC("syscall")
+int syscall_epilogue_tailcall(struct st_ops_args *args)
+{
+ return bpf_kfunc_st_ops_test_epilogue(args);
+}
diff --git a/tools/testing/selftests/bpf/progs/err.h b/tools/testing/selftests/bpf/progs/err.h
new file mode 100644
index 000000000000..38529779a236
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/err.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ERR_H__
+#define __ERR_H__
+
+#define MAX_ERRNO 4095
+#define IS_ERR_VALUE(x) (unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO
+
+#define __STR(x) #x
+
+#define set_if_not_errno_or_zero(x, y) \
+({ \
+ asm volatile ("if %0 s< -4095 goto +1\n" \
+ "if %0 s<= 0 goto +1\n" \
+ "%0 = " __STR(y) "\n" \
+ : "+r"(x)); \
+})
+
+static inline int IS_ERR_OR_NULL(const void *ptr)
+{
+ return !ptr || IS_ERR_VALUE((unsigned long)ptr);
+}
+
+static inline long PTR_ERR(const void *ptr)
+{
+ return (long) ptr;
+}
+
+#endif /* __ERR_H__ */
diff --git a/tools/testing/selftests/bpf/progs/exceptions.c b/tools/testing/selftests/bpf/progs/exceptions.c
new file mode 100644
index 000000000000..f09cd14d8e04
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+#ifndef ETH_P_IP
+#define ETH_P_IP 0x0800
+#endif
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 4);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+static __noinline int static_func(u64 i)
+{
+ bpf_throw(32);
+ return i;
+}
+
+__noinline int global2static_simple(u64 i)
+{
+ static_func(i + 2);
+ return i - 1;
+}
+
+__noinline int global2static(u64 i)
+{
+ if (i == ETH_P_IP)
+ bpf_throw(16);
+ return static_func(i);
+}
+
+static __noinline int static2global(u64 i)
+{
+ return global2static(i) + i;
+}
+
+SEC("tc")
+int exception_throw_always_1(struct __sk_buff *ctx)
+{
+ bpf_throw(64);
+ return 0;
+}
+
+/* In this case, the global func will never be seen executing after call to
+ * static subprog, hence verifier will DCE the remaining instructions. Ensure we
+ * are resilient to that.
+ */
+SEC("tc")
+int exception_throw_always_2(struct __sk_buff *ctx)
+{
+ return global2static_simple(ctx->protocol);
+}
+
+SEC("tc")
+int exception_throw_unwind_1(struct __sk_buff *ctx)
+{
+ return static2global(bpf_ntohs(ctx->protocol));
+}
+
+SEC("tc")
+int exception_throw_unwind_2(struct __sk_buff *ctx)
+{
+ return static2global(bpf_ntohs(ctx->protocol) - 1);
+}
+
+SEC("tc")
+int exception_throw_default(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 1;
+}
+
+SEC("tc")
+int exception_throw_default_value(struct __sk_buff *ctx)
+{
+ bpf_throw(5);
+ return 1;
+}
+
+SEC("tc")
+int exception_tail_call_target(struct __sk_buff *ctx)
+{
+ bpf_throw(16);
+ return 0;
+}
+
+static __noinline
+int exception_tail_call_subprog(struct __sk_buff *ctx)
+{
+ volatile int ret = 10;
+
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return ret;
+}
+
+SEC("tc")
+int exception_tail_call(struct __sk_buff *ctx) {
+ volatile int ret = 0;
+
+ ret = exception_tail_call_subprog(ctx);
+ return ret + 8;
+}
+
+__noinline int exception_ext_global(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ return ret;
+}
+
+static __noinline int exception_ext_static(struct __sk_buff *ctx)
+{
+ return exception_ext_global(ctx);
+}
+
+SEC("tc")
+int exception_ext(struct __sk_buff *ctx)
+{
+ return exception_ext_static(ctx);
+}
+
+__noinline int exception_cb_mod_global(u64 cookie)
+{
+ volatile int ret = 0;
+
+ return ret;
+}
+
+/* Example of how the exception callback supplied during verification can still
+ * introduce extensions by calling to dummy global functions, and alter runtime
+ * behavior.
+ *
+ * Right now we don't allow freplace attachment to exception callback itself,
+ * but if the need arises this restriction is technically feasible to relax in
+ * the future.
+ */
+__noinline int exception_cb_mod(u64 cookie)
+{
+ return exception_cb_mod_global(cookie) + cookie + 10;
+}
+
+SEC("tc")
+__exception_cb(exception_cb_mod)
+int exception_ext_mod_cb_runtime(struct __sk_buff *ctx)
+{
+ bpf_throw(25);
+ return 0;
+}
+
+__noinline static int subprog(struct __sk_buff *ctx)
+{
+ return bpf_ktime_get_ns();
+}
+
+__noinline static int throwing_subprog(struct __sk_buff *ctx)
+{
+ if (ctx->tstamp)
+ bpf_throw(0);
+ return bpf_ktime_get_ns();
+}
+
+__noinline int global_subprog(struct __sk_buff *ctx)
+{
+ return bpf_ktime_get_ns();
+}
+
+__noinline int throwing_global_subprog(struct __sk_buff *ctx)
+{
+ if (ctx->tstamp)
+ bpf_throw(0);
+ return bpf_ktime_get_ns();
+}
+
+SEC("tc")
+int exception_throw_subprog(struct __sk_buff *ctx)
+{
+ switch (ctx->protocol) {
+ case 1:
+ return subprog(ctx);
+ case 2:
+ return global_subprog(ctx);
+ case 3:
+ return throwing_subprog(ctx);
+ case 4:
+ return throwing_global_subprog(ctx);
+ default:
+ break;
+ }
+ bpf_throw(1);
+ return 0;
+}
+
+__noinline int assert_nz_gfunc(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert(cookie != 0);
+ return 0;
+}
+
+__noinline int assert_zero_gfunc(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert(bpf_cmp_unlikely(cookie, ==, 0));
+ return 0;
+}
+
+__noinline int assert_neg_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert(bpf_cmp_unlikely(cookie, <, 0));
+ return 0;
+}
+
+__noinline int assert_pos_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert(bpf_cmp_unlikely(cookie, >, 0));
+ return 0;
+}
+
+__noinline int assert_negeq_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert(bpf_cmp_unlikely(cookie, <=, -1));
+ return 0;
+}
+
+__noinline int assert_poseq_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert(bpf_cmp_unlikely(cookie, >=, 1));
+ return 0;
+}
+
+__noinline int assert_nz_gfunc_with(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert_with(cookie != 0, cookie + 100);
+ return 0;
+}
+
+__noinline int assert_zero_gfunc_with(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert_with(bpf_cmp_unlikely(cookie, ==, 0), cookie + 100);
+ return 0;
+}
+
+__noinline int assert_neg_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_with(bpf_cmp_unlikely(cookie, <, 0), cookie + 100);
+ return 0;
+}
+
+__noinline int assert_pos_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_with(bpf_cmp_unlikely(cookie, >, 0), cookie + 100);
+ return 0;
+}
+
+__noinline int assert_negeq_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_with(bpf_cmp_unlikely(cookie, <=, -1), cookie + 100);
+ return 0;
+}
+
+__noinline int assert_poseq_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_with(bpf_cmp_unlikely(cookie, >=, 1), cookie + 100);
+ return 0;
+}
+
+#define check_assert(name, cookie, tag) \
+SEC("tc") \
+int exception##tag##name(struct __sk_buff *ctx) \
+{ \
+ return name(cookie) + 1; \
+}
+
+check_assert(assert_nz_gfunc, 5, _);
+check_assert(assert_zero_gfunc, 0, _);
+check_assert(assert_neg_gfunc, -100, _);
+check_assert(assert_pos_gfunc, 100, _);
+check_assert(assert_negeq_gfunc, -1, _);
+check_assert(assert_poseq_gfunc, 1, _);
+
+check_assert(assert_nz_gfunc_with, 5, _);
+check_assert(assert_zero_gfunc_with, 0, _);
+check_assert(assert_neg_gfunc_with, -100, _);
+check_assert(assert_pos_gfunc_with, 100, _);
+check_assert(assert_negeq_gfunc_with, -1, _);
+check_assert(assert_poseq_gfunc_with, 1, _);
+
+check_assert(assert_nz_gfunc, 0, _bad_);
+check_assert(assert_zero_gfunc, 5, _bad_);
+check_assert(assert_neg_gfunc, 100, _bad_);
+check_assert(assert_pos_gfunc, -100, _bad_);
+check_assert(assert_negeq_gfunc, 1, _bad_);
+check_assert(assert_poseq_gfunc, -1, _bad_);
+
+check_assert(assert_nz_gfunc_with, 0, _bad_);
+check_assert(assert_zero_gfunc_with, 5, _bad_);
+check_assert(assert_neg_gfunc_with, 100, _bad_);
+check_assert(assert_pos_gfunc_with, -100, _bad_);
+check_assert(assert_negeq_gfunc_with, 1, _bad_);
+check_assert(assert_poseq_gfunc_with, -1, _bad_);
+
+SEC("tc")
+int exception_assert_range(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range(time, 0, ~0ULL);
+ return 1;
+}
+
+SEC("tc")
+int exception_assert_range_with(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range_with(time, 0, ~0ULL, 10);
+ return 1;
+}
+
+SEC("tc")
+int exception_bad_assert_range(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range(time, -100, 100);
+ return 1;
+}
+
+SEC("tc")
+int exception_bad_assert_range_with(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range_with(time, -1000, 1000, 10);
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_assert.c b/tools/testing/selftests/bpf/progs/exceptions_assert.c
new file mode 100644
index 000000000000..5e0a1ca96d4e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions_assert.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <limits.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+#define check_assert(type, op, name, value) \
+ SEC("?tc") \
+ __log_level(2) __failure \
+ int check_assert_##name(void *ctx) \
+ { \
+ type num = bpf_ktime_get_ns(); \
+ bpf_assert(bpf_cmp_unlikely(num, op, value)); \
+ return *(u64 *)num; \
+ }
+
+__msg(": R0_w=0xffffffff80000000")
+check_assert(s64, ==, eq_int_min, INT_MIN);
+__msg(": R0_w=0x7fffffff")
+check_assert(s64, ==, eq_int_max, INT_MAX);
+__msg(": R0_w=0")
+check_assert(s64, ==, eq_zero, 0);
+__msg(": R0_w=0x8000000000000000 R1_w=0x8000000000000000")
+check_assert(s64, ==, eq_llong_min, LLONG_MIN);
+__msg(": R0_w=0x7fffffffffffffff R1_w=0x7fffffffffffffff")
+check_assert(s64, ==, eq_llong_max, LLONG_MAX);
+
+__msg(": R0_w=scalar(id=1,smax=0x7ffffffe)")
+check_assert(s64, <, lt_pos, INT_MAX);
+__msg(": R0_w=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+check_assert(s64, <, lt_zero, 0);
+__msg(": R0_w=scalar(id=1,smax=0xffffffff7fffffff")
+check_assert(s64, <, lt_neg, INT_MIN);
+
+__msg(": R0_w=scalar(id=1,smax=0x7fffffff)")
+check_assert(s64, <=, le_pos, INT_MAX);
+__msg(": R0_w=scalar(id=1,smax=0)")
+check_assert(s64, <=, le_zero, 0);
+__msg(": R0_w=scalar(id=1,smax=0xffffffff80000000")
+check_assert(s64, <=, le_neg, INT_MIN);
+
+__msg(": R0_w=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, >, gt_pos, INT_MAX);
+__msg(": R0_w=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, >, gt_zero, 0);
+__msg(": R0_w=scalar(id=1,smin=0xffffffff80000001")
+check_assert(s64, >, gt_neg, INT_MIN);
+
+__msg(": R0_w=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, >=, ge_pos, INT_MAX);
+__msg(": R0_w=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, >=, ge_zero, 0);
+__msg(": R0_w=scalar(id=1,smin=0xffffffff80000000")
+check_assert(s64, >=, ge_neg, INT_MIN);
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R0=0 R1=ctx() R2=scalar(smin=0xffffffff80000002,smax=smax32=0x7ffffffd,smin32=0x80000002) R10=fp0")
+int check_assert_range_s64(struct __sk_buff *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ s64 num;
+
+ _Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match");
+ if (!sk)
+ return 0;
+ num = sk->rx_queue_mapping;
+ bpf_assert_range(num, INT_MIN + 2, INT_MAX - 2);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R1=ctx() R2=scalar(smin=umin=smin32=umin32=4096,smax=umax=smax32=umax32=8192,var_off=(0x0; 0x3fff))")
+int check_assert_range_u64(struct __sk_buff *ctx)
+{
+ u64 num = ctx->len;
+
+ bpf_assert_range(num, 4096, 8192);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R0=0 R1=ctx() R2=4096 R10=fp0")
+int check_assert_single_range_s64(struct __sk_buff *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ s64 num;
+
+ _Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match");
+ if (!sk)
+ return 0;
+ num = sk->rx_queue_mapping;
+
+ bpf_assert_range(num, 4096, 4096);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R1=ctx() R2=4096 R10=fp0")
+int check_assert_single_range_u64(struct __sk_buff *ctx)
+{
+ u64 num = ctx->len;
+
+ bpf_assert_range(num, 4096, 4096);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R1=pkt(off=64,r=64) R2=pkt_end() R6=pkt(r=64) R10=fp0")
+int check_assert_generic(struct __sk_buff *ctx)
+{
+ u8 *data_end = (void *)(long)ctx->data_end;
+ u8 *data = (void *)(long)ctx->data;
+
+ bpf_assert(data + 64 <= data_end);
+ return data[128];
+}
+
+SEC("?fentry/bpf_check")
+__failure __msg("At program exit the register R1 has smin=64 smax=64")
+int check_assert_with_return(void *ctx)
+{
+ bpf_assert_with(!ctx, 64);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_ext.c b/tools/testing/selftests/bpf/progs/exceptions_ext.c
new file mode 100644
index 000000000000..743c05185d9b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions_ext.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+
+SEC("?fentry")
+int pfentry(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fentry")
+int throwing_fentry(void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline int exception_cb(u64 cookie)
+{
+ return cookie + 64;
+}
+
+SEC("?freplace")
+int extension(struct __sk_buff *ctx)
+{
+ return 0;
+}
+
+SEC("?freplace")
+__exception_cb(exception_cb)
+int throwing_exception_cb_extension(u64 cookie)
+{
+ bpf_throw(32);
+ return 0;
+}
+
+SEC("?freplace")
+__exception_cb(exception_cb)
+int throwing_extension(struct __sk_buff *ctx)
+{
+ bpf_throw(64);
+ return 0;
+}
+
+SEC("?fexit")
+int pfexit(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fexit")
+int throwing_fexit(void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?fmod_ret")
+int pfmod_ret(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fmod_ret")
+int throwing_fmod_ret(void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c
new file mode 100644
index 000000000000..8a0fdff89927
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+extern void bpf_rcu_read_lock(void) __ksym;
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+
+struct foo {
+ struct bpf_rb_node node;
+};
+
+struct hmap_elem {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 64);
+ __type(key, int);
+ __type(value, struct hmap_elem);
+} hmap SEC(".maps");
+
+private(A) struct bpf_spin_lock lock;
+private(A) struct bpf_rb_root rbtree __contains(foo, node);
+
+__noinline void *exception_cb_bad_ret_type(u64 cookie)
+{
+ return NULL;
+}
+
+__noinline int exception_cb_bad_arg_0(void)
+{
+ return 0;
+}
+
+__noinline int exception_cb_bad_arg_2(int a, int b)
+{
+ return 0;
+}
+
+__noinline int exception_cb_ok_arg_small(int a)
+{
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_bad_ret_type)
+__failure __msg("Global function exception_cb_bad_ret_type() doesn't return scalar.")
+int reject_exception_cb_type_1(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_bad_arg_0)
+__failure __msg("exception cb only supports single integer argument")
+int reject_exception_cb_type_2(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_bad_arg_2)
+__failure __msg("exception cb only supports single integer argument")
+int reject_exception_cb_type_3(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_ok_arg_small)
+__success
+int reject_exception_cb_type_4(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline
+static int timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback subprog")
+int reject_async_callback_throw(struct __sk_buff *ctx)
+{
+ struct hmap_elem *elem;
+
+ elem = bpf_map_lookup_elem(&hmap, &(int){0});
+ if (!elem)
+ return 0;
+ return bpf_timer_set_callback(&elem->timer, timer_cb);
+}
+
+__noinline static int subprog_lock(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_spin_lock(&lock);
+ if (ctx->len)
+ bpf_throw(0);
+ return ret;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_with_lock(void *ctx)
+{
+ bpf_spin_lock(&lock);
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_subprog_with_lock(void *ctx)
+{
+ return subprog_lock(ctx);
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region")
+int reject_with_rcu_read_lock(void *ctx)
+{
+ bpf_rcu_read_lock();
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int throwing_subprog(struct __sk_buff *ctx)
+{
+ if (ctx->len)
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region")
+int reject_subprog_with_rcu_read_lock(void *ctx)
+{
+ bpf_rcu_read_lock();
+ return throwing_subprog(ctx);
+}
+
+static bool rbless(struct bpf_rb_node *n1, const struct bpf_rb_node *n2)
+{
+ bpf_throw(0);
+ return true;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_with_rbtree_add_throw(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&lock);
+ bpf_rbtree_add(&rbtree, &f->node, rbless);
+ bpf_spin_unlock(&lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference")
+int reject_with_reference(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int subprog_ref(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int subprog_cb_ref(u32 i, void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference")
+int reject_with_cb_reference(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_loop(5, subprog_cb_ref, NULL, 0);
+ bpf_obj_drop(f);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback")
+int reject_with_cb(void *ctx)
+{
+ bpf_loop(5, subprog_cb_ref, NULL, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference")
+int reject_with_subprog_reference(void *ctx)
+{
+ return subprog_ref(ctx) + 1;
+}
+
+__noinline int throwing_exception_cb(u64 c)
+{
+ bpf_throw(0);
+ return c;
+}
+
+__noinline int exception_cb1(u64 c)
+{
+ return c;
+}
+
+__noinline int exception_cb2(u64 c)
+{
+ return c;
+}
+
+static __noinline int static_func(struct __sk_buff *ctx)
+{
+ return exception_cb1(ctx->tstamp);
+}
+
+__noinline int global_func(struct __sk_buff *ctx)
+{
+ return exception_cb1(ctx->tstamp);
+}
+
+SEC("?tc")
+__exception_cb(throwing_exception_cb)
+__failure __msg("cannot be called from callback subprog")
+int reject_throwing_exception_cb(struct __sk_buff *ctx)
+{
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb1)
+__failure __msg("cannot call exception cb directly")
+int reject_exception_cb_call_global_func(struct __sk_buff *ctx)
+{
+ return global_func(ctx);
+}
+
+SEC("?tc")
+__exception_cb(exception_cb1)
+__failure __msg("cannot call exception cb directly")
+int reject_exception_cb_call_static_func(struct __sk_buff *ctx)
+{
+ return static_func(ctx);
+}
+
+SEC("?tc")
+__exception_cb(exception_cb1)
+__exception_cb(exception_cb2)
+__failure __msg("multiple exception callback tags for main subprog")
+int reject_multiple_exception_cb(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 16;
+}
+
+__noinline int exception_cb_bad_ret(u64 c)
+{
+ return c;
+}
+
+SEC("?fentry/bpf_check")
+__exception_cb(exception_cb_bad_ret)
+__failure __msg("At program exit the register R0 has unknown scalar value should")
+int reject_set_exception_cb_bad_ret1(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fentry/bpf_check")
+__failure __msg("At program exit the register R1 has smin=64 smax=64 should")
+int reject_set_exception_cb_bad_ret2(void *ctx)
+{
+ bpf_throw(64);
+ return 0;
+}
+
+__noinline static int loop_cb1(u32 index, int *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int loop_cb2(u32 index, int *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback")
+int reject_exception_throw_cb(struct __sk_buff *ctx)
+{
+ bpf_loop(5, loop_cb1, NULL, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback")
+int reject_exception_throw_cb_diff(struct __sk_buff *ctx)
+{
+ if (ctx->protocol)
+ bpf_loop(5, loop_cb1, NULL, 0);
+ else
+ bpf_loop(5, loop_cb2, NULL, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exhandler_kern.c b/tools/testing/selftests/bpf/progs/exhandler_kern.c
new file mode 100644
index 000000000000..20d009e2d266
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exhandler_kern.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021, Oracle and/or its affiliates. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+unsigned int exception_triggered;
+int test_pid;
+
+/* TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+SEC("tp_btf/task_newtask")
+int BPF_PROG(trace_task_newtask, struct task_struct *task, u64 clone_flags)
+{
+ int pid = bpf_get_current_pid_tgid() >> 32;
+ struct callback_head *work;
+ void *func;
+
+ if (test_pid != pid)
+ return 0;
+
+ /* To verify we hit an exception we dereference task->task_works->func.
+ * If task work has been added,
+ * - task->task_works is non-NULL; and
+ * - task->task_works->func is non-NULL also (the callback function
+ * must be specified for the task work.
+ *
+ * However, for a newly-created task, task->task_works is NULLed,
+ * so we know the exception handler triggered if task_works is
+ * NULL and func is NULL.
+ */
+ work = task->task_works;
+ func = work->func;
+ /* Currently verifier will fail for `btf_ptr |= btf_ptr` * instruction.
+ * To workaround the issue, use barrier_var() and rewrite as below to
+ * prevent compiler from generating verifier-unfriendly code.
+ */
+ barrier_var(work);
+ if (work)
+ return 0;
+ barrier_var(func);
+ if (func)
+ return 0;
+ exception_triggered++;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fd_htab_lookup.c b/tools/testing/selftests/bpf/progs/fd_htab_lookup.c
new file mode 100644
index 000000000000..a4a9e1db626f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fd_htab_lookup.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct inner_map_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+} inner_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 64);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct inner_map_type);
+} outer_map SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
diff --git a/tools/testing/selftests/bpf/progs/fentry_many_args.c b/tools/testing/selftests/bpf/progs/fentry_many_args.c
new file mode 100644
index 000000000000..b61bb92fee2c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fentry_many_args.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Tencent */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 test1_result = 0;
+SEC("fentry/bpf_testmod_fentry_test7")
+int BPF_PROG(test1, __u64 a, void *b, short c, int d, void *e, char f,
+ int g)
+{
+ test1_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
+ e == (void *)20 && f == 21 && g == 22;
+ return 0;
+}
+
+__u64 test2_result = 0;
+SEC("fentry/bpf_testmod_fentry_test11")
+int BPF_PROG(test2, __u64 a, void *b, short c, int d, void *e, char f,
+ int g, unsigned int h, long i, __u64 j, unsigned long k)
+{
+ test2_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
+ e == (void *)20 && f == 21 && g == 22 && h == 23 &&
+ i == 24 && j == 25 && k == 26;
+ return 0;
+}
+
+__u64 test3_result = 0;
+SEC("fentry/bpf_testmod_fentry_test11")
+int BPF_PROG(test3, __u64 a, __u64 b, __u64 c, __u64 d, __u64 e, __u64 f,
+ __u64 g, __u64 h, __u64 i, __u64 j, __u64 k)
+{
+ test3_result = a == 16 && b == 17 && c == 18 && d == 19 &&
+ e == 20 && f == 21 && g == 22 && h == 23 &&
+ i == 24 && j == 25 && k == 26;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fentry_recursive.c b/tools/testing/selftests/bpf/progs/fentry_recursive.c
new file mode 100644
index 000000000000..2c9fb5ac42b2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fentry_recursive.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Red Hat, Inc. */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* Dummy fentry bpf prog for testing fentry attachment chains */
+SEC("fentry/XXX")
+int BPF_PROG(recursive_attach, int a)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fentry_recursive_target.c b/tools/testing/selftests/bpf/progs/fentry_recursive_target.c
new file mode 100644
index 000000000000..267c876d0aba
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fentry_recursive_target.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Red Hat, Inc. */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* Dummy fentry bpf prog for testing fentry attachment chains. It's going to be
+ * a start of the chain.
+ */
+SEC("fentry/bpf_testmod_fentry_test1")
+int BPF_PROG(test1, int a)
+{
+ return 0;
+}
+
+/* Dummy bpf prog for testing attach_btf presence when attaching an fentry
+ * program.
+ */
+SEC("raw_tp/sys_enter")
+int BPF_PROG(fentry_target, struct pt_regs *regs, long id)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testing/selftests/bpf/progs/fentry_test.c
index 5f645fdaba6f..52a550d281d9 100644
--- a/tools/testing/selftests/bpf/progs/fentry_test.c
+++ b/tools/testing/selftests/bpf/progs/fentry_test.c
@@ -64,7 +64,7 @@ __u64 test7_result = 0;
SEC("fentry/bpf_fentry_test7")
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
{
- if (arg == 0)
+ if (!arg)
test7_result = 1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
index 49a84a3a2306..983b7c233382 100644
--- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
@@ -73,10 +73,10 @@ int test_subprog2(struct args_subprog2 *ctx)
__builtin_preserve_access_index(&skb->len));
ret = ctx->ret;
- /* bpf_prog_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32
- * which randomizes upper 32 bits after BPF_ALU32 insns.
- * Hence after 'w0 <<= 1' upper bits of $rax are random.
- * That is expected and correct. Trim them.
+ /* bpf_prog_test_load() loads "test_pkt_access.bpf.o" with
+ * BPF_F_TEST_RND_HI32 which randomizes upper 32 bits after BPF_ALU32
+ * insns. Hence after 'w0 <<= 1' upper bits of $rax are random. That is
+ * expected and correct. Trim them.
*/
ret = (__u32) ret;
if (len != 74 || ret != 148)
@@ -120,8 +120,6 @@ int new_get_skb_ifindex(int val, struct __sk_buff *skb, int var)
void *data = (void *)(long)skb->data;
struct ipv6hdr ip6, *ip6p;
int ifindex = skb->ifindex;
- __u32 eth_proto;
- __u32 nh_off;
/* check that BPF extension can read packet via direct packet access */
if (data + 14 + sizeof(ip6) > data_end)
diff --git a/tools/testing/selftests/bpf/progs/fexit_many_args.c b/tools/testing/selftests/bpf/progs/fexit_many_args.c
new file mode 100644
index 000000000000..53b335c2dafb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fexit_many_args.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Tencent */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 test1_result = 0;
+SEC("fexit/bpf_testmod_fentry_test7")
+int BPF_PROG(test1, __u64 a, void *b, short c, int d, void *e, char f,
+ int g, int ret)
+{
+ test1_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
+ e == (void *)20 && f == 21 && g == 22 && ret == 133;
+ return 0;
+}
+
+__u64 test2_result = 0;
+SEC("fexit/bpf_testmod_fentry_test11")
+int BPF_PROG(test2, __u64 a, void *b, short c, int d, void *e, char f,
+ int g, unsigned int h, long i, __u64 j, unsigned long k,
+ int ret)
+{
+ test2_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
+ e == (void *)20 && f == 21 && g == 22 && h == 23 &&
+ i == 24 && j == 25 && k == 26 && ret == 231;
+ return 0;
+}
+
+__u64 test3_result = 0;
+SEC("fexit/bpf_testmod_fentry_test11")
+int BPF_PROG(test3, __u64 a, __u64 b, __u64 c, __u64 d, __u64 e, __u64 f,
+ __u64 g, __u64 h, __u64 i, __u64 j, __u64 k, __u64 ret)
+{
+ test3_result = a == 16 && b == 17 && c == 18 && d == 19 &&
+ e == 20 && f == 21 && g == 22 && h == 23 &&
+ i == 24 && j == 25 && k == 26 && ret == 231;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fexit_sleep.c b/tools/testing/selftests/bpf/progs/fexit_sleep.c
new file mode 100644
index 000000000000..106dc75efcc4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fexit_sleep.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char LICENSE[] SEC("license") = "GPL";
+
+int pid = 0;
+int fentry_cnt = 0;
+int fexit_cnt = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int nanosleep_fentry(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ fentry_cnt++;
+ return 0;
+}
+
+SEC("fexit/" SYS_PREFIX "sys_nanosleep")
+int nanosleep_fexit(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ fexit_cnt++;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing/selftests/bpf/progs/fexit_test.c
index 0952affb22a6..8f1ccb7302e1 100644
--- a/tools/testing/selftests/bpf/progs/fexit_test.c
+++ b/tools/testing/selftests/bpf/progs/fexit_test.c
@@ -65,7 +65,7 @@ __u64 test7_result = 0;
SEC("fexit/bpf_fentry_test7")
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
{
- if (arg == 0)
+ if (!arg)
test7_result = 1;
return 0;
}
@@ -74,7 +74,7 @@ __u64 test8_result = 0;
SEC("fexit/bpf_fentry_test8")
int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
{
- if (arg->a == 0)
+ if (!arg->a)
test8_result = 1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/fib_lookup.c b/tools/testing/selftests/bpf/progs/fib_lookup.c
new file mode 100644
index 000000000000..7b5dd2214ff4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fib_lookup.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+#include <bpf/bpf_helpers.h>
+
+struct bpf_fib_lookup fib_params = {};
+int fib_lookup_ret = 0;
+int lookup_flags = 0;
+
+SEC("tc")
+int fib_lookup(struct __sk_buff *skb)
+{
+ fib_lookup_ret = bpf_fib_lookup(skb, &fib_params, sizeof(fib_params),
+ lookup_flags);
+
+ return TC_ACT_SHOT;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/find_vma.c b/tools/testing/selftests/bpf/progs/find_vma.c
new file mode 100644
index 000000000000..02b82774469c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/find_vma.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct callback_ctx {
+ int dummy;
+};
+
+#define VM_EXEC 0x00000004
+#define DNAME_INLINE_LEN 32
+
+pid_t target_pid = 0;
+char d_iname[DNAME_INLINE_LEN] = {0};
+__u32 found_vm_exec = 0;
+__u64 addr = 0;
+int find_zero_ret = -1;
+int find_addr_ret = -1;
+
+static long check_vma(struct task_struct *task, struct vm_area_struct *vma,
+ struct callback_ctx *data)
+{
+ if (vma->vm_file)
+ bpf_probe_read_kernel_str(d_iname, DNAME_INLINE_LEN - 1,
+ vma->vm_file->f_path.dentry->d_shortname.string);
+
+ /* check for VM_EXEC */
+ if (vma->vm_flags & VM_EXEC)
+ found_vm_exec = 1;
+
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_getpid(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct callback_ctx data = {};
+
+ if (task->pid != target_pid)
+ return 0;
+
+ find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0);
+
+ /* this should return -ENOENT */
+ find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0);
+ return 0;
+}
+
+SEC("perf_event")
+int handle_pe(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct callback_ctx data = {};
+
+ if (task->pid != target_pid)
+ return 0;
+
+ find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0);
+
+ /* In NMI, this should return -EBUSY, as the previous call is using
+ * the irq_work.
+ */
+ find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/find_vma_fail1.c b/tools/testing/selftests/bpf/progs/find_vma_fail1.c
new file mode 100644
index 000000000000..7ba9a428f228
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/find_vma_fail1.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#define vm_flags vm_start
+
+char _license[] SEC("license") = "GPL";
+
+struct callback_ctx {
+ int dummy;
+};
+
+static long write_vma(struct task_struct *task, struct vm_area_struct *vma,
+ struct callback_ctx *data)
+{
+ /* writing to vma, which is illegal */
+ vma->vm_start = 0xffffffffff600000;
+
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_getpid(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct callback_ctx data = {};
+
+ bpf_find_vma(task, 0, write_vma, &data, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/find_vma_fail2.c b/tools/testing/selftests/bpf/progs/find_vma_fail2.c
new file mode 100644
index 000000000000..9bcf3203e26b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/find_vma_fail2.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct callback_ctx {
+ int dummy;
+};
+
+static long write_task(struct task_struct *task, struct vm_area_struct *vma,
+ struct callback_ctx *data)
+{
+ /* writing to task, which is illegal */
+ task->mm = NULL;
+
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_getpid(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct callback_ctx data = {};
+
+ bpf_find_vma(task, 0, write_task, &data, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
new file mode 100644
index 000000000000..52f6995ff29c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 3);
+ __type(key, __u32);
+ __type(value, __u64);
+} arraymap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_map SEC(".maps");
+
+struct callback_ctx {
+ int output;
+};
+
+const volatile int bypass_unused = 1;
+
+static __u64
+unused_subprog(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ data->output = 0;
+ return 1;
+}
+
+static __u64
+check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ data->output += *val;
+ if (*key == 1)
+ return 1; /* stop the iteration */
+ return 0;
+}
+
+__u32 cpu = 0;
+__u64 percpu_val = 0;
+
+static __u64
+check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ cpu = bpf_get_smp_processor_id();
+ percpu_val = *val;
+ return 0;
+}
+
+u32 arraymap_output = 0;
+
+SEC("tc")
+int test_pkt_access(struct __sk_buff *skb)
+{
+ struct callback_ctx data;
+
+ data.output = 0;
+ bpf_for_each_map_elem(&arraymap, check_array_elem, &data, 0);
+ if (!bypass_unused)
+ bpf_for_each_map_elem(&arraymap, unused_subprog, &data, 0);
+ arraymap_output = data.output;
+
+ bpf_for_each_map_elem(&percpu_map, check_percpu_elem, (void *)0, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
new file mode 100644
index 000000000000..276994d5c0c7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 3);
+ __type(key, __u32);
+ __type(value, __u64);
+} hashmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_map SEC(".maps");
+
+struct callback_ctx {
+ struct __sk_buff *ctx;
+ int input;
+ int output;
+};
+
+static __u64
+check_hash_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ struct __sk_buff *skb = data->ctx;
+ __u32 k;
+ __u64 v;
+
+ if (skb) {
+ k = *key;
+ v = *val;
+ if (skb->len == 10000 && k == 10 && v == 10)
+ data->output = 3; /* impossible path */
+ else
+ data->output = 4;
+ } else {
+ data->output = data->input;
+ bpf_map_delete_elem(map, key);
+ }
+
+ return 0;
+}
+
+__u32 cpu = 0;
+__u32 percpu_called = 0;
+__u32 percpu_key = 0;
+__u64 percpu_val = 0;
+int percpu_output = 0;
+
+static __u64
+check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *unused)
+{
+ struct callback_ctx data;
+
+ percpu_called++;
+ cpu = bpf_get_smp_processor_id();
+ percpu_key = *key;
+ percpu_val = *val;
+
+ data.ctx = 0;
+ data.input = 100;
+ data.output = 0;
+ bpf_for_each_map_elem(&hashmap, check_hash_elem, &data, 0);
+ percpu_output = data.output;
+
+ return 0;
+}
+
+int hashmap_output = 0;
+int hashmap_elems = 0;
+int percpu_map_elems = 0;
+
+SEC("tc")
+int test_pkt_access(struct __sk_buff *skb)
+{
+ struct callback_ctx data;
+
+ data.ctx = skb;
+ data.input = 10;
+ data.output = 0;
+ hashmap_elems = bpf_for_each_map_elem(&hashmap, check_hash_elem, &data, 0);
+ hashmap_output = data.output;
+
+ percpu_map_elems = bpf_for_each_map_elem(&percpu_map, check_percpu_elem,
+ (void *)0, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/for_each_hash_modify.c b/tools/testing/selftests/bpf/progs/for_each_hash_modify.c
new file mode 100644
index 000000000000..82307166f789
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_hash_modify.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Intel Corporation */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 128);
+ __type(key, __u64);
+ __type(value, __u64);
+} hashmap SEC(".maps");
+
+static int cb(struct bpf_map *map, __u64 *key, __u64 *val, void *arg)
+{
+ bpf_map_delete_elem(map, key);
+ bpf_map_update_elem(map, key, val, 0);
+ return 0;
+}
+
+SEC("tc")
+int test_pkt_access(struct __sk_buff *skb)
+{
+ (void)skb;
+
+ bpf_for_each_map_elem(&hashmap, cb, NULL, 0);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c b/tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c
new file mode 100644
index 000000000000..8e545865ea33
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array_map SEC(".maps");
+
+static __u64
+check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ void *data)
+{
+ bpf_get_current_comm(key, sizeof(*key));
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int test_map_key_write(const void *ctx)
+{
+ bpf_for_each_map_elem(&array_map, check_array_elem, NULL, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/for_each_multi_maps.c b/tools/testing/selftests/bpf/progs/for_each_multi_maps.c
new file mode 100644
index 000000000000..ff0bed7d4459
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_multi_maps.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 3);
+ __type(key, __u32);
+ __type(value, __u64);
+} arraymap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 5);
+ __type(key, __u32);
+ __type(value, __u64);
+} hashmap SEC(".maps");
+
+struct callback_ctx {
+ int output;
+};
+
+u32 data_output = 0;
+int use_array = 0;
+
+static __u64
+check_map_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ data->output += *val;
+ return 0;
+}
+
+SEC("tc")
+int test_pkt_access(struct __sk_buff *skb)
+{
+ struct callback_ctx data;
+
+ data.output = 0;
+ if (use_array)
+ bpf_for_each_map_elem(&arraymap, check_map_elem, &data, 0);
+ else
+ bpf_for_each_map_elem(&hashmap, check_map_elem, &data, 0);
+ data_output = data.output;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/free_timer.c b/tools/testing/selftests/bpf/progs/free_timer.c
new file mode 100644
index 000000000000..4501ae8fc414
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/free_timer.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <time.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#define MAX_ENTRIES 8
+
+struct map_value {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, MAX_ENTRIES);
+} map SEC(".maps");
+
+static int timer_cb(void *map, void *key, struct map_value *value)
+{
+ volatile int sum = 0;
+ int i;
+
+ bpf_for(i, 0, 1024 * 1024) sum += i;
+
+ return 0;
+}
+
+static int start_cb(int key)
+{
+ struct map_value *value;
+
+ value = bpf_map_lookup_elem(&map, (void *)&key);
+ if (!value)
+ return 0;
+
+ bpf_timer_init(&value->timer, &map, CLOCK_MONOTONIC);
+ bpf_timer_set_callback(&value->timer, timer_cb);
+ /* Hope 100us will be enough to wake-up and run the overwrite thread */
+ bpf_timer_start(&value->timer, 100000, BPF_F_TIMER_CPU_PIN);
+
+ return 0;
+}
+
+static int overwrite_cb(int key)
+{
+ struct map_value zero = {};
+
+ /* Free the timer which may run on other CPU */
+ bpf_map_update_elem(&map, (void *)&key, &zero, BPF_ANY);
+
+ return 0;
+}
+
+SEC("syscall")
+int BPF_PROG(start_timer)
+{
+ bpf_loop(MAX_ENTRIES, start_cb, NULL, 0);
+ return 0;
+}
+
+SEC("syscall")
+int BPF_PROG(overwrite_timer)
+{
+ bpf_loop(MAX_ENTRIES, overwrite_cb, NULL, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_attach_probe.c b/tools/testing/selftests/bpf/progs/freplace_attach_probe.c
index bb2a77c5b62b..370a0e1922e0 100644
--- a/tools/testing/selftests/bpf/progs/freplace_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/freplace_attach_probe.c
@@ -23,7 +23,7 @@ struct {
SEC("freplace/handle_kprobe")
int new_handle_kprobe(struct pt_regs *ctx)
{
- struct hmap_elem zero = {}, *val;
+ struct hmap_elem *val;
int key = 0;
val = bpf_map_lookup_elem(&hash_map, &key);
diff --git a/tools/testing/selftests/bpf/progs/freplace_cls_redirect.c b/tools/testing/selftests/bpf/progs/freplace_cls_redirect.c
index 68a5a9db928a..7e94412d47a5 100644
--- a/tools/testing/selftests/bpf/progs/freplace_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/freplace_cls_redirect.c
@@ -7,12 +7,12 @@
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
-struct bpf_map_def SEC("maps") sock_map = {
- .type = BPF_MAP_TYPE_SOCKMAP,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 2,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 2);
+} sock_map SEC(".maps");
SEC("freplace/cls_redirect")
int freplace_cls_redirect_test(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/freplace_dead_global_func.c b/tools/testing/selftests/bpf/progs/freplace_dead_global_func.c
new file mode 100644
index 000000000000..e6a75f86cac6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_dead_global_func.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("freplace")
+int freplace_prog(void)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_global_func.c b/tools/testing/selftests/bpf/progs/freplace_global_func.c
new file mode 100644
index 000000000000..96cb61a6ce87
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_global_func.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+__noinline
+int test_ctx_global_func(struct __sk_buff *skb)
+{
+ volatile int retval = 1;
+ return retval;
+}
+
+SEC("freplace/test_pkt_access")
+int new_test_pkt_access(struct __sk_buff *skb)
+{
+ return test_ctx_global_func(skb);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_progmap.c b/tools/testing/selftests/bpf/progs/freplace_progmap.c
new file mode 100644
index 000000000000..81b56b9aa7d6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_progmap.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CPUMAP);
+ __type(key, __u32);
+ __type(value, struct bpf_cpumap_val);
+ __uint(max_entries, 1);
+} cpu_map SEC(".maps");
+
+SEC("xdp/cpumap")
+int xdp_drop_prog(struct xdp_md *ctx)
+{
+ return XDP_DROP;
+}
+
+SEC("freplace")
+int xdp_cpumap_prog(struct xdp_md *ctx)
+{
+ return bpf_redirect_map(&cpu_map, 0, XDP_PASS);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_unreliable_prog.c b/tools/testing/selftests/bpf/progs/freplace_unreliable_prog.c
new file mode 100644
index 000000000000..624078abf3de
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_unreliable_prog.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+SEC("freplace/btf_unreliable_kprobe")
+/* context type is what BPF verifier expects for kprobe context, but target
+ * program has `stuct whatever *ctx` argument, so freplace operation will be
+ * rejected with the following message:
+ *
+ * arg0 replace_btf_unreliable_kprobe(struct pt_regs *) doesn't match btf_unreliable_kprobe(struct whatever *)
+ */
+int replace_btf_unreliable_kprobe(bpf_user_pt_regs_t *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/get_branch_snapshot.c b/tools/testing/selftests/bpf/progs/get_branch_snapshot.c
new file mode 100644
index 000000000000..511ac634eef0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/get_branch_snapshot.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 test1_hits = 0;
+__u64 address_low = 0;
+__u64 address_high = 0;
+int wasted_entries = 0;
+long total_entries = 0;
+
+#define ENTRY_CNT 32
+struct perf_branch_entry entries[ENTRY_CNT] = {};
+
+static inline bool gbs_in_range(__u64 val)
+{
+ return (val >= address_low) && (val < address_high);
+}
+
+SEC("fexit/bpf_testmod_loop_test")
+int BPF_PROG(test1, int n, int ret)
+{
+ long i;
+
+ total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0);
+ total_entries /= sizeof(struct perf_branch_entry);
+
+ for (i = 0; i < ENTRY_CNT; i++) {
+ if (i >= total_entries)
+ break;
+ if (gbs_in_range(entries[i].from) && gbs_in_range(entries[i].to))
+ test1_hits++;
+ else if (!test1_hits)
+ wasted_entries++;
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
index 6b42db2fe391..30fd504856c7 100644
--- a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
+++ b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
@@ -4,37 +4,18 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 1);
- __type(key, __u32);
- __type(value, __u64);
-} cg_ids SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 1);
- __type(key, __u32);
- __type(value, __u32);
-} pidmap SEC(".maps");
+__u64 cg_id;
+__u64 expected_pid;
SEC("tracepoint/syscalls/sys_enter_nanosleep")
int trace(void *ctx)
{
__u32 pid = bpf_get_current_pid_tgid();
- __u32 key = 0, *expected_pid;
- __u64 *val;
-
- expected_pid = bpf_map_lookup_elem(&pidmap, &key);
- if (!expected_pid || *expected_pid != pid)
- return 0;
- val = bpf_map_lookup_elem(&cg_ids, &key);
- if (val)
- *val = bpf_get_current_cgroup_id();
+ if (expected_pid == pid)
+ cg_id = bpf_get_current_cgroup_id();
return 0;
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/progs/get_func_args_test.c b/tools/testing/selftests/bpf/progs/get_func_args_test.c
new file mode 100644
index 000000000000..e0f34a55e697
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/get_func_args_test.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 test1_result = 0;
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test1)
+{
+ __u64 cnt = bpf_get_func_arg_cnt(ctx);
+ __u64 a = 0, z = 0, ret = 0;
+ __s64 err;
+
+ test1_result = cnt == 1;
+
+ /* valid arguments */
+ err = bpf_get_func_arg(ctx, 0, &a);
+
+ /* We need to cast access to traced function argument values with
+ * proper type cast, because trampoline uses type specific instruction
+ * to save it, like for 'int a' with 32-bit mov like:
+ *
+ * mov %edi,-0x8(%rbp)
+ *
+ * so the upper 4 bytes are not zeroed.
+ */
+ test1_result &= err == 0 && ((int) a == 1);
+
+ /* not valid argument */
+ err = bpf_get_func_arg(ctx, 1, &z);
+ test1_result &= err == -EINVAL;
+
+ /* return value fails in fentry */
+ err = bpf_get_func_ret(ctx, &ret);
+ test1_result &= err == -EOPNOTSUPP;
+ return 0;
+}
+
+__u64 test2_result = 0;
+SEC("fexit/bpf_fentry_test2")
+int BPF_PROG(test2)
+{
+ __u64 cnt = bpf_get_func_arg_cnt(ctx);
+ __u64 a = 0, b = 0, z = 0, ret = 0;
+ __s64 err;
+
+ test2_result = cnt == 2;
+
+ /* valid arguments */
+ err = bpf_get_func_arg(ctx, 0, &a);
+ test2_result &= err == 0 && (int) a == 2;
+
+ err = bpf_get_func_arg(ctx, 1, &b);
+ test2_result &= err == 0 && b == 3;
+
+ /* not valid argument */
+ err = bpf_get_func_arg(ctx, 2, &z);
+ test2_result &= err == -EINVAL;
+
+ /* return value */
+ err = bpf_get_func_ret(ctx, &ret);
+ test2_result &= err == 0 && ret == 5;
+ return 0;
+}
+
+__u64 test3_result = 0;
+SEC("fmod_ret/bpf_modify_return_test")
+int BPF_PROG(fmod_ret_test, int _a, int *_b, int _ret)
+{
+ __u64 cnt = bpf_get_func_arg_cnt(ctx);
+ __u64 a = 0, b = 0, z = 0, ret = 0;
+ __s64 err;
+
+ test3_result = cnt == 2;
+
+ /* valid arguments */
+ err = bpf_get_func_arg(ctx, 0, &a);
+ test3_result &= err == 0 && ((int) a == 1);
+
+ err = bpf_get_func_arg(ctx, 1, &b);
+ test3_result &= err == 0 && ((int *) b == _b);
+
+ /* not valid argument */
+ err = bpf_get_func_arg(ctx, 2, &z);
+ test3_result &= err == -EINVAL;
+
+ /* return value */
+ err = bpf_get_func_ret(ctx, &ret);
+ test3_result &= err == 0 && ret == 0;
+
+ /* change return value, it's checked in fexit_test program */
+ return 1234;
+}
+
+__u64 test4_result = 0;
+SEC("fexit/bpf_modify_return_test")
+int BPF_PROG(fexit_test, int _a, int *_b, int _ret)
+{
+ __u64 cnt = bpf_get_func_arg_cnt(ctx);
+ __u64 a = 0, b = 0, z = 0, ret = 0;
+ __s64 err;
+
+ test4_result = cnt == 2;
+
+ /* valid arguments */
+ err = bpf_get_func_arg(ctx, 0, &a);
+ test4_result &= err == 0 && ((int) a == 1);
+
+ err = bpf_get_func_arg(ctx, 1, &b);
+ test4_result &= err == 0 && ((int *) b == _b);
+
+ /* not valid argument */
+ err = bpf_get_func_arg(ctx, 2, &z);
+ test4_result &= err == -EINVAL;
+
+ /* return value */
+ err = bpf_get_func_ret(ctx, &ret);
+ test4_result &= err == 0 && ret == 1234;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_test.c
new file mode 100644
index 000000000000..2011cacdeb18
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/get_func_ip_test.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+extern int bpf_fentry_test1(int a) __ksym;
+extern int bpf_modify_return_test(int a, int *b) __ksym;
+
+extern const void bpf_fentry_test2 __ksym;
+extern const void bpf_fentry_test3 __ksym;
+extern const void bpf_fentry_test4 __ksym;
+
+extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
+
+/* This function is here to have CONFIG_X86_KERNEL_IBT
+ * used and added to object BTF.
+ */
+int unused(void)
+{
+ return CONFIG_X86_KERNEL_IBT ? 0 : 1;
+}
+
+__u64 test1_result = 0;
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test1, int a)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test1_result = (const void *) addr == &bpf_fentry_test1;
+ return 0;
+}
+
+__u64 test2_result = 0;
+SEC("fexit/bpf_fentry_test2")
+int BPF_PROG(test2, int a)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test2_result = (const void *) addr == &bpf_fentry_test2;
+ return 0;
+}
+
+__u64 test3_result = 0;
+SEC("kprobe/bpf_fentry_test3")
+int test3(struct pt_regs *ctx)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test3_result = (const void *) addr == &bpf_fentry_test3;
+ return 0;
+}
+
+__u64 test4_result = 0;
+SEC("kretprobe/bpf_fentry_test4")
+int BPF_KRETPROBE(test4)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test4_result = (const void *) addr == &bpf_fentry_test4;
+ return 0;
+}
+
+__u64 test5_result = 0;
+SEC("fmod_ret/bpf_modify_return_test")
+int BPF_PROG(test5, int a, int *b, int ret)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test5_result = (const void *) addr == &bpf_modify_return_test;
+ return ret;
+}
+
+__u64 test6_result = 0;
+SEC("?kprobe")
+int test6(struct pt_regs *ctx)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test6_result = (const void *) addr == 0;
+ return 0;
+}
+
+unsigned long uprobe_trigger;
+
+__u64 test7_result = 0;
+SEC("uprobe//proc/self/exe:uprobe_trigger")
+int BPF_UPROBE(test7)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test7_result = (const void *) addr == (const void *) uprobe_trigger;
+ return 0;
+}
+
+__u64 test8_result = 0;
+SEC("uretprobe//proc/self/exe:uprobe_trigger")
+int BPF_URETPROBE(test8, int ret)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test8_result = (const void *) addr == (const void *) uprobe_trigger;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_uprobe_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_uprobe_test.c
new file mode 100644
index 000000000000..052f8a4345a8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/get_func_ip_uprobe_test.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+unsigned long uprobe_trigger_body;
+
+__u64 test1_result = 0;
+SEC("uprobe//proc/self/exe:uprobe_trigger_body+1")
+int BPF_UPROBE(test1)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test1_result = (const void *) addr == (const void *) uprobe_trigger_body + 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/getpeername4_prog.c b/tools/testing/selftests/bpf/progs/getpeername4_prog.c
new file mode 100644
index 000000000000..4c97208cd25d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getpeername4_prog.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+#define REWRITE_ADDRESS_IP4 0xc0a801fe // 192.168.1.254
+#define REWRITE_ADDRESS_PORT4 4040
+
+SEC("cgroup/getpeername4")
+int getpeername_v4_prog(struct bpf_sock_addr *ctx)
+{
+ ctx->user_ip4 = bpf_htonl(REWRITE_ADDRESS_IP4);
+ ctx->user_port = bpf_htons(REWRITE_ADDRESS_PORT4);
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/getpeername6_prog.c b/tools/testing/selftests/bpf/progs/getpeername6_prog.c
new file mode 100644
index 000000000000..070e4d7f636c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getpeername6_prog.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+#define REWRITE_ADDRESS_IP6_0 0xfaceb00c
+#define REWRITE_ADDRESS_IP6_1 0x12345678
+#define REWRITE_ADDRESS_IP6_2 0x00000000
+#define REWRITE_ADDRESS_IP6_3 0x0000abcd
+
+#define REWRITE_ADDRESS_PORT6 6060
+
+SEC("cgroup/getpeername6")
+int getpeername_v6_prog(struct bpf_sock_addr *ctx)
+{
+ ctx->user_ip6[0] = bpf_htonl(REWRITE_ADDRESS_IP6_0);
+ ctx->user_ip6[1] = bpf_htonl(REWRITE_ADDRESS_IP6_1);
+ ctx->user_ip6[2] = bpf_htonl(REWRITE_ADDRESS_IP6_2);
+ ctx->user_ip6[3] = bpf_htonl(REWRITE_ADDRESS_IP6_3);
+ ctx->user_port = bpf_htons(REWRITE_ADDRESS_PORT6);
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c
new file mode 100644
index 000000000000..5a76754f846b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite";
+
+SEC("cgroup/getpeername_unix")
+int getpeername_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1;
+ int ret;
+
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1);
+ if (ret)
+ return 1;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 1;
+
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
+ return 1;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/getsockname4_prog.c b/tools/testing/selftests/bpf/progs/getsockname4_prog.c
new file mode 100644
index 000000000000..e298487c6347
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getsockname4_prog.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+#define REWRITE_ADDRESS_IP4 0xc0a801fe // 192.168.1.254
+#define REWRITE_ADDRESS_PORT4 4040
+
+SEC("cgroup/getsockname4")
+int getsockname_v4_prog(struct bpf_sock_addr *ctx)
+{
+ ctx->user_ip4 = bpf_htonl(REWRITE_ADDRESS_IP4);
+ ctx->user_port = bpf_htons(REWRITE_ADDRESS_PORT4);
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/getsockname6_prog.c b/tools/testing/selftests/bpf/progs/getsockname6_prog.c
new file mode 100644
index 000000000000..811d10cd5525
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getsockname6_prog.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+#define REWRITE_ADDRESS_IP6_0 0xfaceb00c
+#define REWRITE_ADDRESS_IP6_1 0x12345678
+#define REWRITE_ADDRESS_IP6_2 0x00000000
+#define REWRITE_ADDRESS_IP6_3 0x0000abcd
+
+#define REWRITE_ADDRESS_PORT6 6060
+
+SEC("cgroup/getsockname6")
+int getsockname_v6_prog(struct bpf_sock_addr *ctx)
+{
+ ctx->user_ip6[0] = bpf_htonl(REWRITE_ADDRESS_IP6_0);
+ ctx->user_ip6[1] = bpf_htonl(REWRITE_ADDRESS_IP6_1);
+ ctx->user_ip6[2] = bpf_htonl(REWRITE_ADDRESS_IP6_2);
+ ctx->user_ip6[3] = bpf_htonl(REWRITE_ADDRESS_IP6_3);
+ ctx->user_port = bpf_htons(REWRITE_ADDRESS_PORT6);
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c
new file mode 100644
index 000000000000..7867113c696f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite";
+
+SEC("cgroup/getsockname_unix")
+int getsockname_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1;
+ int ret;
+
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1);
+ if (ret)
+ return 1;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 1;
+
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
+ return 1;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/htab_mem_bench.c b/tools/testing/selftests/bpf/progs/htab_mem_bench.c
new file mode 100644
index 000000000000..b1b721b14d67
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/htab_mem_bench.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <stdbool.h>
+#include <errno.h>
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define OP_BATCH 64
+
+struct update_ctx {
+ unsigned int from;
+ unsigned int step;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, 4);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} htab SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+unsigned char zeroed_value[4096];
+unsigned int nr_thread = 0;
+long op_cnt = 0;
+
+static int write_htab(unsigned int i, struct update_ctx *ctx, unsigned int flags)
+{
+ bpf_map_update_elem(&htab, &ctx->from, zeroed_value, flags);
+ ctx->from += ctx->step;
+
+ return 0;
+}
+
+static int overwrite_htab(unsigned int i, struct update_ctx *ctx)
+{
+ return write_htab(i, ctx, 0);
+}
+
+static int newwrite_htab(unsigned int i, struct update_ctx *ctx)
+{
+ return write_htab(i, ctx, BPF_NOEXIST);
+}
+
+static int del_htab(unsigned int i, struct update_ctx *ctx)
+{
+ bpf_map_delete_elem(&htab, &ctx->from);
+ ctx->from += ctx->step;
+
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_getpgid")
+int overwrite(void *ctx)
+{
+ struct update_ctx update;
+
+ update.from = bpf_get_smp_processor_id();
+ update.step = nr_thread;
+ bpf_loop(OP_BATCH, overwrite_htab, &update, 0);
+ __sync_fetch_and_add(&op_cnt, 1);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_getpgid")
+int batch_add_batch_del(void *ctx)
+{
+ struct update_ctx update;
+
+ update.from = bpf_get_smp_processor_id();
+ update.step = nr_thread;
+ bpf_loop(OP_BATCH, overwrite_htab, &update, 0);
+
+ update.from = bpf_get_smp_processor_id();
+ bpf_loop(OP_BATCH, del_htab, &update, 0);
+
+ __sync_fetch_and_add(&op_cnt, 2);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_getpgid")
+int add_only(void *ctx)
+{
+ struct update_ctx update;
+
+ update.from = bpf_get_smp_processor_id() / 2;
+ update.step = nr_thread / 2;
+ bpf_loop(OP_BATCH, newwrite_htab, &update, 0);
+ __sync_fetch_and_add(&op_cnt, 1);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_getppid")
+int del_only(void *ctx)
+{
+ struct update_ctx update;
+
+ update.from = bpf_get_smp_processor_id() / 2;
+ update.step = nr_thread / 2;
+ bpf_loop(OP_BATCH, del_htab, &update, 0);
+ __sync_fetch_and_add(&op_cnt, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/htab_reuse.c b/tools/testing/selftests/bpf/progs/htab_reuse.c
new file mode 100644
index 000000000000..7f7368cb3095
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/htab_reuse.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct htab_val {
+ struct bpf_spin_lock lock;
+ unsigned int data;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 64);
+ __type(key, unsigned int);
+ __type(value, struct htab_val);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} htab SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/htab_update.c b/tools/testing/selftests/bpf/progs/htab_update.c
new file mode 100644
index 000000000000..7481bb30b29b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/htab_update.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} htab SEC(".maps");
+
+int pid = 0;
+int update_err = 0;
+
+SEC("?fentry/lookup_elem_raw")
+int lookup_elem_raw(void *ctx)
+{
+ __u32 key = 0, value = 1;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != pid)
+ return 0;
+
+ update_err = bpf_map_update_elem(&htab, &key, &value, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/ima.c b/tools/testing/selftests/bpf/progs/ima.c
new file mode 100644
index 000000000000..e16a2c208481
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/ima.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020 Google LLC.
+ */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+u32 monitored_pid = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 1 << 12);
+} ringbuf SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+bool use_ima_file_hash;
+bool enable_bprm_creds_for_exec;
+bool enable_kernel_read_file;
+bool test_deny;
+
+static void ima_test_common(struct file *file)
+{
+ u64 ima_hash = 0;
+ u64 *sample;
+ int ret;
+ u32 pid;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid == monitored_pid) {
+ if (!use_ima_file_hash)
+ ret = bpf_ima_inode_hash(file->f_inode, &ima_hash,
+ sizeof(ima_hash));
+ else
+ ret = bpf_ima_file_hash(file, &ima_hash,
+ sizeof(ima_hash));
+ if (ret < 0 || ima_hash == 0)
+ return;
+
+ sample = bpf_ringbuf_reserve(&ringbuf, sizeof(u64), 0);
+ if (!sample)
+ return;
+
+ *sample = ima_hash;
+ bpf_ringbuf_submit(sample, 0);
+ }
+
+ return;
+}
+
+static int ima_test_deny(void)
+{
+ u32 pid;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid == monitored_pid && test_deny)
+ return -EPERM;
+
+ return 0;
+}
+
+SEC("lsm.s/bprm_committed_creds")
+void BPF_PROG(bprm_committed_creds, struct linux_binprm *bprm)
+{
+ ima_test_common(bprm->file);
+}
+
+SEC("lsm.s/bprm_creds_for_exec")
+int BPF_PROG(bprm_creds_for_exec, struct linux_binprm *bprm)
+{
+ if (!enable_bprm_creds_for_exec)
+ return 0;
+
+ ima_test_common(bprm->file);
+ return 0;
+}
+
+SEC("lsm.s/kernel_read_file")
+int BPF_PROG(kernel_read_file, struct file *file, enum kernel_read_file_id id,
+ bool contents)
+{
+ int ret;
+
+ if (!enable_kernel_read_file)
+ return 0;
+
+ if (!contents)
+ return 0;
+
+ if (id != READING_POLICY)
+ return 0;
+
+ ret = ima_test_deny();
+ if (ret < 0)
+ return ret;
+
+ ima_test_common(file);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/inner_array_lookup.c b/tools/testing/selftests/bpf/progs/inner_array_lookup.c
new file mode 100644
index 000000000000..c2c8f2fa451d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/inner_array_lookup.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct inner_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 5);
+ __type(key, int);
+ __type(value, int);
+} inner_map1 SEC(".maps");
+
+struct outer_map {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 3);
+ __type(key, int);
+ __array(values, struct inner_map);
+} outer_map1 SEC(".maps") = {
+ .values = {
+ [2] = &inner_map1,
+ },
+};
+
+SEC("raw_tp/sys_enter")
+int handle__sys_enter(void *ctx)
+{
+ int outer_key = 2, inner_key = 3;
+ int *val;
+ void *map;
+
+ map = bpf_map_lookup_elem(&outer_map1, &outer_key);
+ if (!map)
+ return 1;
+
+ val = bpf_map_lookup_elem(map, &inner_key);
+ if (!val)
+ return 1;
+
+ if (*val == 1)
+ *val = 2;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/ip_check_defrag.c b/tools/testing/selftests/bpf/progs/ip_check_defrag.c
new file mode 100644
index 000000000000..645b2c9f7867
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/ip_check_defrag.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_tracing_net.h"
+
+#define NF_DROP 0
+#define NF_ACCEPT 1
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86DD
+#define IP_MF 0x2000
+#define IP_OFFSET 0x1FFF
+#define NEXTHDR_FRAGMENT 44
+
+extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
+ struct bpf_dynptr *ptr__uninit) __ksym;
+extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset,
+ void *buffer, uint32_t buffer__sz) __ksym;
+
+volatile int shootdowns = 0;
+
+static bool is_frag_v4(struct iphdr *iph)
+{
+ int offset;
+ int flags;
+
+ offset = bpf_ntohs(iph->frag_off);
+ flags = offset & ~IP_OFFSET;
+ offset &= IP_OFFSET;
+ offset <<= 3;
+
+ return (flags & IP_MF) || offset;
+}
+
+static bool is_frag_v6(struct ipv6hdr *ip6h)
+{
+ /* Simplifying assumption that there are no extension headers
+ * between fixed header and fragmentation header. This assumption
+ * is only valid in this test case. It saves us the hassle of
+ * searching all potential extension headers.
+ */
+ return ip6h->nexthdr == NEXTHDR_FRAGMENT;
+}
+
+static int handle_v4(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ u8 iph_buf[20] = {};
+ struct iphdr *iph;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr))
+ return NF_DROP;
+
+ iph = bpf_dynptr_slice(&ptr, 0, iph_buf, sizeof(iph_buf));
+ if (!iph)
+ return NF_DROP;
+
+ /* Shootdown any frags */
+ if (is_frag_v4(iph)) {
+ shootdowns++;
+ return NF_DROP;
+ }
+
+ return NF_ACCEPT;
+}
+
+static int handle_v6(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ struct ipv6hdr *ip6h;
+ u8 ip6h_buf[40] = {};
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr))
+ return NF_DROP;
+
+ ip6h = bpf_dynptr_slice(&ptr, 0, ip6h_buf, sizeof(ip6h_buf));
+ if (!ip6h)
+ return NF_DROP;
+
+ /* Shootdown any frags */
+ if (is_frag_v6(ip6h)) {
+ shootdowns++;
+ return NF_DROP;
+ }
+
+ return NF_ACCEPT;
+}
+
+SEC("netfilter")
+int defrag(struct bpf_nf_ctx *ctx)
+{
+ struct __sk_buff *skb = (struct __sk_buff *)ctx->skb;
+
+ switch (bpf_ntohs(ctx->skb->protocol)) {
+ case ETH_P_IP:
+ return handle_v4(skb);
+ case ETH_P_IPV6:
+ return handle_v6(skb);
+ default:
+ return NF_ACCEPT;
+ }
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/irq.c b/tools/testing/selftests/bpf/progs/irq.c
new file mode 100644
index 000000000000..74d912b22de9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/irq.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+unsigned long global_flags;
+
+extern void bpf_local_irq_save(unsigned long *) __weak __ksym;
+extern void bpf_local_irq_restore(unsigned long *) __weak __ksym;
+extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym;
+
+struct bpf_res_spin_lock lockA __hidden SEC(".data.A");
+struct bpf_res_spin_lock lockB __hidden SEC(".data.B");
+
+SEC("?tc")
+__failure __msg("arg#0 doesn't point to an irq flag on stack")
+int irq_save_bad_arg(struct __sk_buff *ctx)
+{
+ bpf_local_irq_save(&global_flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("arg#0 doesn't point to an irq flag on stack")
+int irq_restore_bad_arg(struct __sk_buff *ctx)
+{
+ bpf_local_irq_restore(&global_flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_2(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_3(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ bpf_local_irq_save(&flags3);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_3_minus_2(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ bpf_local_irq_save(&flags3);
+ bpf_local_irq_restore(&flags3);
+ bpf_local_irq_restore(&flags2);
+ return 0;
+}
+
+static __noinline void local_irq_save(unsigned long *flags)
+{
+ bpf_local_irq_save(flags);
+}
+
+static __noinline void local_irq_restore(unsigned long *flags)
+{
+ bpf_local_irq_restore(flags);
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_1_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ local_irq_save(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_2_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_3_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ local_irq_save(&flags3);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_3_minus_2_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ local_irq_save(&flags3);
+ local_irq_restore(&flags3);
+ local_irq_restore(&flags2);
+ return 0;
+}
+
+SEC("?tc")
+__success
+int irq_balance(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ local_irq_save(&flags);
+ local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__success
+int irq_balance_n(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ local_irq_save(&flags3);
+ local_irq_restore(&flags3);
+ local_irq_restore(&flags2);
+ local_irq_restore(&flags1);
+ return 0;
+}
+
+static __noinline void local_irq_balance(void)
+{
+ unsigned long flags;
+
+ local_irq_save(&flags);
+ local_irq_restore(&flags);
+}
+
+static __noinline void local_irq_balance_n(void)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ local_irq_save(&flags3);
+ local_irq_restore(&flags3);
+ local_irq_restore(&flags2);
+ local_irq_restore(&flags1);
+}
+
+SEC("?tc")
+__success
+int irq_balance_subprog(struct __sk_buff *ctx)
+{
+ local_irq_balance();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("sleepable helper bpf_copy_from_user#")
+int irq_sleepable_helper(void *ctx)
+{
+ unsigned long flags;
+ u32 data;
+
+ local_irq_save(&flags);
+ bpf_copy_from_user(&data, sizeof(data), NULL);
+ local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("kernel func bpf_copy_from_user_str is sleepable within IRQ-disabled region")
+int irq_sleepable_kfunc(void *ctx)
+{
+ unsigned long flags;
+ u32 data;
+
+ local_irq_save(&flags);
+ bpf_copy_from_user_str(&data, sizeof(data), NULL, 0);
+ local_irq_restore(&flags);
+ return 0;
+}
+
+int __noinline global_local_irq_balance(void)
+{
+ local_irq_balance_n();
+ return 0;
+}
+
+SEC("?tc")
+__success
+int irq_global_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ global_local_irq_balance();
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_restore_ooo(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ bpf_local_irq_restore(&flags1);
+ bpf_local_irq_restore(&flags2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_restore_ooo_3(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ bpf_local_irq_restore(&flags2);
+ bpf_local_irq_save(&flags3);
+ bpf_local_irq_restore(&flags1);
+ bpf_local_irq_restore(&flags3);
+ return 0;
+}
+
+static __noinline void local_irq_save_3(unsigned long *flags1, unsigned long *flags2,
+ unsigned long *flags3)
+{
+ local_irq_save(flags1);
+ local_irq_save(flags2);
+ local_irq_save(flags3);
+}
+
+SEC("?tc")
+__success
+int irq_restore_3_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save_3(&flags1, &flags2, &flags3);
+ bpf_local_irq_restore(&flags3);
+ bpf_local_irq_restore(&flags2);
+ bpf_local_irq_restore(&flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_restore_4_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+ unsigned long flags4;
+
+ local_irq_save_3(&flags1, &flags2, &flags3);
+ bpf_local_irq_restore(&flags3);
+ bpf_local_irq_save(&flags4);
+ bpf_local_irq_restore(&flags4);
+ bpf_local_irq_restore(&flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_restore_ooo_3_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save_3(&flags1, &flags2, &flags3);
+ bpf_local_irq_restore(&flags3);
+ bpf_local_irq_restore(&flags2);
+ bpf_local_irq_save(&flags3);
+ bpf_local_irq_restore(&flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected an initialized")
+int irq_restore_invalid(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags = 0xfaceb00c;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected uninitialized")
+int irq_save_invalid(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected an initialized")
+int irq_restore_iter(struct __sk_buff *ctx)
+{
+ struct bpf_iter_num it;
+
+ bpf_iter_num_new(&it, 0, 42);
+ bpf_local_irq_restore((unsigned long *)&it);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=1")
+int irq_save_iter(struct __sk_buff *ctx)
+{
+ struct bpf_iter_num it;
+
+ /* Ensure same sized slot has st->ref_obj_id set, so we reject based on
+ * slot_type != STACK_IRQ_FLAG...
+ */
+ _Static_assert(sizeof(it) == sizeof(unsigned long), "broken iterator size");
+
+ bpf_iter_num_new(&it, 0, 42);
+ bpf_local_irq_save((unsigned long *)&it);
+ bpf_local_irq_restore((unsigned long *)&it);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected an initialized")
+int irq_flag_overwrite(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ flags = 0xdeadbeef;
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected an initialized")
+int irq_flag_overwrite_partial(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ *(((char *)&flags) + 1) = 0xff;
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_ooo_refs_array(struct __sk_buff *ctx)
+{
+ unsigned long flags[4];
+ struct { int i; } *p;
+
+ /* refs=1 */
+ bpf_local_irq_save(&flags[0]);
+
+ /* refs=1,2 */
+ p = bpf_obj_new(typeof(*p));
+ if (!p) {
+ bpf_local_irq_restore(&flags[0]);
+ return 0;
+ }
+
+ /* refs=1,2,3 */
+ bpf_local_irq_save(&flags[1]);
+
+ /* refs=1,2,3,4 */
+ bpf_local_irq_save(&flags[2]);
+
+ /* Now when we remove ref=2, the verifier must not break the ordering in
+ * the refs array between 1,3,4. With an older implementation, the
+ * verifier would swap the last element with the removed element, but to
+ * maintain the stack property we need to use memmove.
+ */
+ bpf_obj_drop(p);
+
+ /* Save and restore to reset active_irq_id to 3, as the ordering is now
+ * refs=1,4,3. When restoring the linear scan will find prev_id in order
+ * as 3 instead of 4.
+ */
+ bpf_local_irq_save(&flags[3]);
+ bpf_local_irq_restore(&flags[3]);
+
+ /* With the incorrect implementation, we can release flags[1], flags[2],
+ * and flags[0], i.e. in the wrong order.
+ */
+ bpf_local_irq_restore(&flags[1]);
+ bpf_local_irq_restore(&flags[2]);
+ bpf_local_irq_restore(&flags[0]);
+ return 0;
+}
+
+int __noinline
+global_subprog(int i)
+{
+ if (i)
+ bpf_printk("%p", &i);
+ return i;
+}
+
+int __noinline
+global_sleepable_helper_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+int __noinline
+global_sleepable_kfunc_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user_str(&i, sizeof(i), NULL, 0);
+ global_subprog(i);
+ return i;
+}
+
+int __noinline
+global_subprog_calling_sleepable_global(int i)
+{
+ if (!i)
+ global_sleepable_kfunc_subprog(i);
+ return i;
+}
+
+SEC("?syscall")
+__success
+int irq_non_sleepable_global_subprog(void *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ global_subprog(0);
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+int irq_sleepable_helper_global_subprog(void *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ global_sleepable_helper_subprog(0);
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+int irq_sleepable_global_subprog_indirect(void *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ global_subprog_calling_sleepable_global(0);
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_ooo_lock_cond_inv(struct __sk_buff *ctx)
+{
+ unsigned long flags1, flags2;
+
+ if (bpf_res_spin_lock_irqsave(&lockA, &flags1))
+ return 0;
+ if (bpf_res_spin_lock_irqsave(&lockB, &flags2)) {
+ bpf_res_spin_unlock_irqrestore(&lockA, &flags1);
+ return 0;
+ }
+
+ bpf_res_spin_unlock_irqrestore(&lockB, &flags1);
+ bpf_res_spin_unlock_irqrestore(&lockA, &flags2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed")
+int irq_wrong_kfunc_class_1(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+
+ if (bpf_res_spin_lock_irqsave(&lockA, &flags1))
+ return 0;
+ /* For now, bpf_local_irq_restore is not allowed in critical section,
+ * but this test ensures error will be caught with kfunc_class when it's
+ * opened up. Tested by temporarily permitting this kfunc in critical
+ * section.
+ */
+ bpf_local_irq_restore(&flags1);
+ bpf_res_spin_unlock_irqrestore(&lockA, &flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed")
+int irq_wrong_kfunc_class_2(struct __sk_buff *ctx)
+{
+ unsigned long flags1, flags2;
+
+ bpf_local_irq_save(&flags1);
+ if (bpf_res_spin_lock_irqsave(&lockA, &flags2))
+ return 0;
+ bpf_local_irq_restore(&flags2);
+ bpf_res_spin_unlock_irqrestore(&lockA, &flags1);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
new file mode 100644
index 000000000000..7dd92a303bf6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -0,0 +1,1929 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_compiler.h"
+
+static volatile int zero = 0;
+
+int my_pid;
+int arr[256];
+int small_arr[16] SEC(".data.small_arr");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 10);
+ __type(key, int);
+ __type(value, int);
+} amap SEC(".maps");
+
+#ifdef REAL_TEST
+#define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0
+#else
+#define MY_PID_GUARD() ({ })
+#endif
+
+SEC("?raw_tp")
+__failure __msg("math between map_value pointer and register with unbounded min value is not allowed")
+int iter_err_unsafe_c_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, i = zero; /* obscure initial value of i */
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 1000);
+ while ((v = bpf_iter_num_next(&it))) {
+ i++;
+ }
+ bpf_iter_num_destroy(&it);
+
+ small_arr[i] = 123; /* invalid */
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("unbounded memory access")
+int iter_err_unsafe_asm_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+
+ MY_PID_GUARD();
+
+ asm volatile (
+ "r6 = %[zero];" /* iteration counter */
+ "r1 = %[it];" /* iterator state */
+ "r2 = 0;"
+ "r3 = 1000;"
+ "r4 = 1;"
+ "call %[bpf_iter_num_new];"
+ "loop:"
+ "r1 = %[it];"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto out;"
+ "r6 += 1;"
+ "goto loop;"
+ "out:"
+ "r1 = %[it];"
+ "call %[bpf_iter_num_destroy];"
+ "r1 = %[small_arr];"
+ "r2 = r6;"
+ "r2 <<= 2;"
+ "r1 += r2;"
+ "*(u32 *)(r1 + 0) = r6;" /* invalid */
+ :
+ : [it]"r"(&it),
+ [small_arr]"r"(small_arr),
+ [zero]"r"(zero),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_common, "r6"
+ );
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_while_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 3);
+ while ((v = bpf_iter_num_next(&it))) {
+ bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
+ }
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_while_loop_auto_cleanup(const void *ctx)
+{
+ __attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it;
+ int *v;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 3);
+ while ((v = bpf_iter_num_next(&it))) {
+ bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
+ }
+ /* (!) no explicit bpf_iter_num_destroy() */
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_for_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 5, 10);
+ for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) {
+ bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
+ }
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_bpf_for_each_macro(const void *ctx)
+{
+ int *v;
+
+ MY_PID_GUARD();
+
+ bpf_for_each(num, v, 5, 10) {
+ bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
+ }
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_bpf_for_macro(const void *ctx)
+{
+ int i;
+
+ MY_PID_GUARD();
+
+ bpf_for(i, 5, 10) {
+ bpf_printk("ITER_BASIC: E2 VAL: v=%d", i);
+ }
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_pragma_unroll_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, i;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 2);
+ __pragma_loop_no_unroll
+ for (i = 0; i < 3; i++) {
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
+ }
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_manual_unroll_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 100, 200);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1);
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_multiple_sequential_loops(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, i;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 3);
+ while ((v = bpf_iter_num_next(&it))) {
+ bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
+ }
+ bpf_iter_num_destroy(&it);
+
+ bpf_iter_num_new(&it, 5, 10);
+ for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) {
+ bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
+ }
+ bpf_iter_num_destroy(&it);
+
+ bpf_iter_num_new(&it, 0, 2);
+ __pragma_loop_no_unroll
+ for (i = 0; i < 3; i++) {
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
+ }
+ bpf_iter_num_destroy(&it);
+
+ bpf_iter_num_new(&it, 100, 200);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1);
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_limit_cond_break_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, i = 0, sum = 0;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 10);
+ while ((v = bpf_iter_num_next(&it))) {
+ bpf_printk("ITER_SIMPLE: i=%d v=%d", i, *v);
+ sum += *v;
+
+ i++;
+ if (i > 3)
+ break;
+ }
+ bpf_iter_num_destroy(&it);
+
+ bpf_printk("ITER_SIMPLE: sum=%d\n", sum);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_obfuscate_counter(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ /* Make i's initial value unknowable for verifier to prevent it from
+ * pruning if/else branch inside the loop body and marking i as precise.
+ */
+ int i = zero;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 10);
+ while ((v = bpf_iter_num_next(&it))) {
+ int x;
+
+ i += 1;
+
+ /* If we initialized i as `int i = 0;` above, verifier would
+ * track that i becomes 1 on first iteration after increment
+ * above, and here verifier would eagerly prune else branch
+ * and mark i as precise, ruining open-coded iterator logic
+ * completely, as each next iteration would have a different
+ * *precise* value of i, and thus there would be no
+ * convergence of state. This would result in reaching maximum
+ * instruction limit, no matter what the limit is.
+ */
+ if (i == 1)
+ x = 123;
+ else
+ x = i * 3 + 1;
+
+ bpf_printk("ITER_OBFUSCATE_COUNTER: i=%d v=%d x=%d", i, *v, x);
+
+ sum += x;
+ }
+ bpf_iter_num_destroy(&it);
+
+ bpf_printk("ITER_OBFUSCATE_COUNTER: sum=%d\n", sum);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_search_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, *elem = NULL;
+ bool found = false;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 10);
+
+ while ((v = bpf_iter_num_next(&it))) {
+ bpf_printk("ITER_SEARCH_LOOP: v=%d", *v);
+
+ if (*v == 2) {
+ found = true;
+ elem = v;
+ barrier_var(elem);
+ }
+ }
+
+ /* should fail to verify if bpf_iter_num_destroy() is here */
+
+ if (found)
+ /* here found element will be wrong, we should have copied
+ * value to a variable, but here we want to make sure we can
+ * access memory after the loop anyways
+ */
+ bpf_printk("ITER_SEARCH_LOOP: FOUND IT = %d!\n", *elem);
+ else
+ bpf_printk("ITER_SEARCH_LOOP: NOT FOUND IT!\n");
+
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_array_fill(const void *ctx)
+{
+ int sum, i;
+
+ MY_PID_GUARD();
+
+ bpf_for(i, 0, ARRAY_SIZE(arr)) {
+ arr[i] = i * 2;
+ }
+
+ sum = 0;
+ bpf_for(i, 0, ARRAY_SIZE(arr)) {
+ sum += arr[i];
+ }
+
+ bpf_printk("ITER_ARRAY_FILL: sum=%d (should be %d)\n", sum, 255 * 256);
+
+ return 0;
+}
+
+static int arr2d[4][5];
+static int arr2d_row_sums[4];
+static int arr2d_col_sums[5];
+
+SEC("raw_tp")
+__success
+int iter_nested_iters(const void *ctx)
+{
+ int sum, row, col;
+
+ MY_PID_GUARD();
+
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ bpf_for( col, 0, ARRAY_SIZE(arr2d[0])) {
+ arr2d[row][col] = row * col;
+ }
+ }
+
+ /* zero-initialize sums */
+ sum = 0;
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ arr2d_row_sums[row] = 0;
+ }
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ arr2d_col_sums[col] = 0;
+ }
+
+ /* calculate sums */
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ sum += arr2d[row][col];
+ arr2d_row_sums[row] += arr2d[row][col];
+ arr2d_col_sums[col] += arr2d[row][col];
+ }
+ }
+
+ bpf_printk("ITER_NESTED_ITERS: total sum=%d", sum);
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ bpf_printk("ITER_NESTED_ITERS: row #%d sum=%d", row, arr2d_row_sums[row]);
+ }
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ bpf_printk("ITER_NESTED_ITERS: col #%d sum=%d%s",
+ col, arr2d_col_sums[col],
+ col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : "");
+ }
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_nested_deeply_iters(const void *ctx)
+{
+ int sum = 0;
+
+ MY_PID_GUARD();
+
+ bpf_repeat(10) {
+ bpf_repeat(10) {
+ bpf_repeat(10) {
+ bpf_repeat(10) {
+ bpf_repeat(10) {
+ sum += 1;
+ }
+ }
+ }
+ }
+ /* validate that we can break from inside bpf_repeat() */
+ break;
+ }
+
+ return sum;
+}
+
+static __noinline void fill_inner_dimension(int row)
+{
+ int col;
+
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ arr2d[row][col] = row * col;
+ }
+}
+
+static __noinline int sum_inner_dimension(int row)
+{
+ int sum = 0, col;
+
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ sum += arr2d[row][col];
+ arr2d_row_sums[row] += arr2d[row][col];
+ arr2d_col_sums[col] += arr2d[row][col];
+ }
+
+ return sum;
+}
+
+SEC("raw_tp")
+__success
+int iter_subprog_iters(const void *ctx)
+{
+ int sum, row, col;
+
+ MY_PID_GUARD();
+
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ fill_inner_dimension(row);
+ }
+
+ /* zero-initialize sums */
+ sum = 0;
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ arr2d_row_sums[row] = 0;
+ }
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ arr2d_col_sums[col] = 0;
+ }
+
+ /* calculate sums */
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ sum += sum_inner_dimension(row);
+ }
+
+ bpf_printk("ITER_SUBPROG_ITERS: total sum=%d", sum);
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ bpf_printk("ITER_SUBPROG_ITERS: row #%d sum=%d",
+ row, arr2d_row_sums[row]);
+ }
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ bpf_printk("ITER_SUBPROG_ITERS: col #%d sum=%d%s",
+ col, arr2d_col_sums[col],
+ col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : "");
+ }
+
+ return 0;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1000);
+} hash_map SEC(".maps");
+
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int iter_err_too_permissive1(const void *ctx)
+{
+ int *map_val = NULL;
+ int key = 0;
+
+ MY_PID_GUARD();
+
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
+ if (!map_val)
+ return 0;
+
+ bpf_repeat(1000000) {
+ map_val = NULL;
+ }
+
+ *map_val = 123;
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'map_value_or_null'")
+int iter_err_too_permissive2(const void *ctx)
+{
+ int *map_val = NULL;
+ int key = 0;
+
+ MY_PID_GUARD();
+
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
+ if (!map_val)
+ return 0;
+
+ bpf_repeat(1000000) {
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
+ }
+
+ *map_val = 123;
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'map_value_or_null'")
+int iter_err_too_permissive3(const void *ctx)
+{
+ int *map_val = NULL;
+ int key = 0;
+ bool found = false;
+
+ MY_PID_GUARD();
+
+ bpf_repeat(1000000) {
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
+ found = true;
+ }
+
+ if (found)
+ *map_val = 123;
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_tricky_but_fine(const void *ctx)
+{
+ int *map_val = NULL;
+ int key = 0;
+ bool found = false;
+
+ MY_PID_GUARD();
+
+ bpf_repeat(1000000) {
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
+ if (map_val) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ *map_val = 123;
+
+ return 0;
+}
+
+#define __bpf_memzero(p, sz) bpf_probe_read_kernel((p), (sz), 0)
+
+SEC("raw_tp")
+__success
+int iter_stack_array_loop(const void *ctx)
+{
+ long arr1[16], arr2[16], sum = 0;
+ int i;
+
+ MY_PID_GUARD();
+
+ /* zero-init arr1 and arr2 in such a way that verifier doesn't know
+ * it's all zeros; if we don't do that, we'll make BPF verifier track
+ * all combination of zero/non-zero stack slots for arr1/arr2, which
+ * will lead to O(2^(ARRAY_SIZE(arr1)+ARRAY_SIZE(arr2))) different
+ * states
+ */
+ __bpf_memzero(arr1, sizeof(arr1));
+ __bpf_memzero(arr2, sizeof(arr1));
+
+ /* validate that we can break and continue when using bpf_for() */
+ bpf_for(i, 0, ARRAY_SIZE(arr1)) {
+ if (i & 1) {
+ arr1[i] = i;
+ continue;
+ } else {
+ arr2[i] = i;
+ break;
+ }
+ }
+
+ bpf_for(i, 0, ARRAY_SIZE(arr1)) {
+ sum += arr1[i] + arr2[i];
+ }
+
+ return sum;
+}
+
+static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul)
+{
+ int *t, i;
+
+ while ((t = bpf_iter_num_next(it))) {
+ i = *t;
+ if (i >= n)
+ break;
+ arr[i] = i * mul;
+ }
+}
+
+static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
+{
+ int *t, i, sum = 0;
+
+ while ((t = bpf_iter_num_next(it))) {
+ i = *t;
+ if ((__u32)i >= n)
+ break;
+ sum += arr[i];
+ }
+
+ return sum;
+}
+
+SEC("raw_tp")
+__success
+int iter_pass_iter_ptr_to_subprog(const void *ctx)
+{
+ int arr1[16], arr2[32];
+ struct bpf_iter_num it;
+ int n, sum1, sum2;
+
+ MY_PID_GUARD();
+
+ /* fill arr1 */
+ n = ARRAY_SIZE(arr1);
+ bpf_iter_num_new(&it, 0, n);
+ fill(&it, arr1, n, 2);
+ bpf_iter_num_destroy(&it);
+
+ /* fill arr2 */
+ n = ARRAY_SIZE(arr2);
+ bpf_iter_num_new(&it, 0, n);
+ fill(&it, arr2, n, 10);
+ bpf_iter_num_destroy(&it);
+
+ /* sum arr1 */
+ n = ARRAY_SIZE(arr1);
+ bpf_iter_num_new(&it, 0, n);
+ sum1 = sum(&it, arr1, n);
+ bpf_iter_num_destroy(&it);
+
+ /* sum arr2 */
+ n = ARRAY_SIZE(arr2);
+ bpf_iter_num_new(&it, 0, n);
+ sum2 = sum(&it, arr2, n);
+ bpf_iter_num_destroy(&it);
+
+ bpf_printk("sum1=%d, sum2=%d", sum1, sum2);
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure
+__msg("R1 type=scalar expected=fp")
+__naked int delayed_read_mark(void)
+{
+ /* This is equivalent to C program below.
+ * The call to bpf_iter_num_next() is reachable with r7 values &fp[-16] and 0xdead.
+ * State with r7=&fp[-16] is visited first and follows r6 != 42 ... continue branch.
+ * At this point iterator next() call is reached with r7 that has no read mark.
+ * Loop body with r7=0xdead would only be visited if verifier would decide to continue
+ * with second loop iteration. Absence of read mark on r7 might affect state
+ * equivalent logic used for iterator convergence tracking.
+ *
+ * r7 = &fp[-16]
+ * fp[-16] = 0
+ * r6 = bpf_get_prandom_u32()
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * r6++
+ * if (r6 != 42) {
+ * r7 = 0xdead
+ * continue;
+ * }
+ * bpf_probe_read_user(r7, 8, 0xdeadbeef); // this is not safe
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "r7 = r10;"
+ "r7 += -16;"
+ "r0 = 0;"
+ "*(u64 *)(r7 + 0) = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "1:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto 2f;"
+ "r6 += 1;"
+ "if r6 != 42 goto 3f;"
+ "r7 = 0xdead;"
+ "goto 1b;"
+ "3:"
+ "r1 = r7;"
+ "r2 = 8;"
+ "r3 = 0xdeadbeef;"
+ "call %[bpf_probe_read_user];"
+ "goto 1b;"
+ "2:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_probe_read_user)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__failure
+__msg("math between fp pointer and register with unbounded")
+__naked int delayed_precision_mark(void)
+{
+ /* This is equivalent to C program below.
+ * The test is similar to delayed_iter_mark but verifies that incomplete
+ * precision don't fool verifier.
+ * The call to bpf_iter_num_next() is reachable with r7 values -16 and -32.
+ * State with r7=-16 is visited first and follows r6 != 42 ... continue branch.
+ * At this point iterator next() call is reached with r7 that has no read
+ * and precision marks.
+ * Loop body with r7=-32 would only be visited if verifier would decide to continue
+ * with second loop iteration. Absence of precision mark on r7 might affect state
+ * equivalent logic used for iterator convergence tracking.
+ *
+ * r8 = 0
+ * fp[-16] = 0
+ * r7 = -16
+ * r6 = bpf_get_prandom_u32()
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * if (r6 != 42) {
+ * r7 = -32
+ * r6 = bpf_get_prandom_u32()
+ * continue;
+ * }
+ * r0 = r10
+ * r0 += r7
+ * r8 = *(u64 *)(r0 + 0) // this is not safe
+ * r6 = bpf_get_prandom_u32()
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return r8
+ */
+ asm volatile (
+ "r8 = 0;"
+ "*(u64 *)(r10 - 16) = r8;"
+ "r7 = -16;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "1:"
+ "r1 = r10;"
+ "r1 += -8;\n"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto 2f;"
+ "if r6 != 42 goto 3f;"
+ "r7 = -33;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "goto 1b;\n"
+ "3:"
+ "r0 = r10;"
+ "r0 += r7;"
+ "r8 = *(u64 *)(r0 + 0);"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "goto 1b;\n"
+ "2:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = r8;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_probe_read_user)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__failure
+__msg("math between fp pointer and register with unbounded")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked int loop_state_deps1(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * The case turns out to be tricky in a sense that:
+ * - states with c=-25 are explored only on a second iteration
+ * of the outer loop;
+ * - states with read+precise mark on c are explored only on
+ * second iteration of the inner loop and in a state which
+ * is pushed to states stack first.
+ *
+ * Depending on the details of iterator convergence logic
+ * verifier might stop states traversal too early and miss
+ * unsafe c=-25 memory access.
+ *
+ * j = iter_new(); // fp[-16]
+ * a = 0; // r6
+ * b = 0; // r7
+ * c = -24; // r8
+ * while (iter_next(j)) {
+ * i = iter_new(); // fp[-8]
+ * a = 0; // r6
+ * b = 0; // r7
+ * while (iter_next(i)) {
+ * if (a == 1) {
+ * a = 0;
+ * b = 1;
+ * } else if (a == 0) {
+ * a = 1;
+ * if (random() == 42)
+ * continue;
+ * if (b == 1) {
+ * *(r10 + c) = 7; // this is not safe
+ * iter_destroy(i);
+ * iter_destroy(j);
+ * return;
+ * }
+ * }
+ * }
+ * iter_destroy(i);
+ * a = 0;
+ * b = 0;
+ * c = -25;
+ * }
+ * iter_destroy(j);
+ * return;
+ */
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -16;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "r8 = -24;"
+ "j_loop_%=:"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto j_loop_end_%=;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "i_loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto i_loop_end_%=;"
+ "check_one_r6_%=:"
+ "if r6 != 1 goto check_zero_r6_%=;"
+ "r6 = 0;"
+ "r7 = 1;"
+ "goto i_loop_%=;"
+ "check_zero_r6_%=:"
+ "if r6 != 0 goto i_loop_%=;"
+ "r6 = 1;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto check_one_r7_%=;"
+ "goto i_loop_%=;"
+ "check_one_r7_%=:"
+ "if r7 != 1 goto i_loop_%=;"
+ "r0 = r10;"
+ "r0 += r8;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "i_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "r8 = -25;"
+ "goto j_loop_%=;"
+ "j_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__failure
+__msg("math between fp pointer and register with unbounded")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked int loop_state_deps2(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * The case turns out to be tricky in a sense that:
+ * - states with read+precise mark on c are explored only on a second
+ * iteration of the first inner loop and in a state which is pushed to
+ * states stack first.
+ * - states with c=-25 are explored only on a second iteration of the
+ * second inner loop and in a state which is pushed to states stack
+ * first.
+ *
+ * Depending on the details of iterator convergence logic
+ * verifier might stop states traversal too early and miss
+ * unsafe c=-25 memory access.
+ *
+ * j = iter_new(); // fp[-16]
+ * a = 0; // r6
+ * b = 0; // r7
+ * c = -24; // r8
+ * while (iter_next(j)) {
+ * i = iter_new(); // fp[-8]
+ * a = 0; // r6
+ * b = 0; // r7
+ * while (iter_next(i)) {
+ * if (a == 1) {
+ * a = 0;
+ * b = 1;
+ * } else if (a == 0) {
+ * a = 1;
+ * if (random() == 42)
+ * continue;
+ * if (b == 1) {
+ * *(r10 + c) = 7; // this is not safe
+ * iter_destroy(i);
+ * iter_destroy(j);
+ * return;
+ * }
+ * }
+ * }
+ * iter_destroy(i);
+ * i = iter_new(); // fp[-8]
+ * a = 0; // r6
+ * b = 0; // r7
+ * while (iter_next(i)) {
+ * if (a == 1) {
+ * a = 0;
+ * b = 1;
+ * } else if (a == 0) {
+ * a = 1;
+ * if (random() == 42)
+ * continue;
+ * if (b == 1) {
+ * a = 0;
+ * c = -25;
+ * }
+ * }
+ * }
+ * iter_destroy(i);
+ * }
+ * iter_destroy(j);
+ * return;
+ */
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -16;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "r8 = -24;"
+ "j_loop_%=:"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto j_loop_end_%=;"
+
+ /* first inner loop */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "i_loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto i_loop_end_%=;"
+ "check_one_r6_%=:"
+ "if r6 != 1 goto check_zero_r6_%=;"
+ "r6 = 0;"
+ "r7 = 1;"
+ "goto i_loop_%=;"
+ "check_zero_r6_%=:"
+ "if r6 != 0 goto i_loop_%=;"
+ "r6 = 1;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto check_one_r7_%=;"
+ "goto i_loop_%=;"
+ "check_one_r7_%=:"
+ "if r7 != 1 goto i_loop_%=;"
+ "r0 = r10;"
+ "r0 += r8;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "i_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+
+ /* second inner loop */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "i2_loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto i2_loop_end_%=;"
+ "check2_one_r6_%=:"
+ "if r6 != 1 goto check2_zero_r6_%=;"
+ "r6 = 0;"
+ "r7 = 1;"
+ "goto i2_loop_%=;"
+ "check2_zero_r6_%=:"
+ "if r6 != 0 goto i2_loop_%=;"
+ "r6 = 1;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto check2_one_r7_%=;"
+ "goto i2_loop_%=;"
+ "check2_one_r7_%=:"
+ "if r7 != 1 goto i2_loop_%=;"
+ "r6 = 0;"
+ "r8 = -25;"
+ "goto i2_loop_%=;"
+ "i2_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+
+ "r6 = 0;"
+ "r7 = 0;"
+ "goto j_loop_%=;"
+ "j_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__failure
+__msg("math between fp pointer and register with unbounded")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked int loop_state_deps3(void)
+{
+ /* This is equivalent to a C program below.
+ *
+ * if (random() != 24) { // assume false branch is placed first
+ * i = iter_new(); // fp[-8]
+ * while (iter_next(i));
+ * iter_destroy(i);
+ * return;
+ * }
+ *
+ * for (i = 10; i > 0; i--); // increase dfs_depth for child states
+ *
+ * i = iter_new(); // fp[-8]
+ * b = -24; // r8
+ * for (;;) { // checkpoint (L)
+ * if (iter_next(i)) // checkpoint (N)
+ * break;
+ * if (random() == 77) { // assume false branch is placed first
+ * *(u64 *)(r10 + b) = 7; // this is not safe when b == -25
+ * iter_destroy(i);
+ * return;
+ * }
+ * if (random() == 42) { // assume false branch is placed first
+ * b = -25;
+ * }
+ * }
+ * iter_destroy(i);
+ *
+ * In case of a buggy verifier first loop might poison
+ * env->cur_state->loop_entry with a state having 0 branches
+ * and small dfs_depth. This would trigger NOT_EXACT states
+ * comparison for some states within second loop.
+ * Specifically, checkpoint (L) might be problematic if:
+ * - branch with '*(u64 *)(r10 + b) = 7' is not explored yet;
+ * - checkpoint (L) is first reached in state {b=-24};
+ * - traversal is pruned at checkpoint (N) setting checkpoint's (L)
+ * branch count to 0, thus making it eligible for use in pruning;
+ * - checkpoint (L) is next reached in state {b=-25},
+ * this would cause NOT_EXACT comparison with a state {b=-24}
+ * while 'b' is not marked precise yet.
+ */
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 24 goto 2f;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 5;"
+ "call %[bpf_iter_num_new];"
+ "1:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 != 0 goto 1b;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "2:"
+ /* loop to increase dfs_depth */
+ "r0 = 10;"
+ "3:"
+ "r0 -= 1;"
+ "if r0 != 0 goto 3b;"
+ /* end of loop */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r8 = -24;"
+ "main_loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto main_loop_end_%=;"
+ /* first if */
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 77 goto unsafe_write_%=;"
+ /* second if */
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 42 goto poison_r8_%=;"
+ /* iterate */
+ "goto main_loop_%=;"
+ "main_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+
+ "unsafe_write_%=:"
+ "r0 = r10;"
+ "r0 += r8;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "goto main_loop_end_%=;"
+
+ "poison_r8_%=:"
+ "r8 = -25;"
+ "goto main_loop_%=;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__success
+__naked int triple_continue(void)
+{
+ /* This is equivalent to C program below.
+ * High branching factor of the loop body turned out to be
+ * problematic for one of the iterator convergence tracking
+ * algorithms explored.
+ *
+ * r6 = bpf_get_prandom_u32()
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * if (bpf_get_prandom_u32() != 42)
+ * continue;
+ * if (bpf_get_prandom_u32() != 42)
+ * continue;
+ * if (bpf_get_prandom_u32() != 42)
+ * continue;
+ * r0 += 0;
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto loop_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto loop_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto loop_%=;"
+ "r0 += 0;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__success
+__naked int widen_spill(void)
+{
+ /* This is equivalent to C program below.
+ * The counter is stored in fp[-16], if this counter is not widened
+ * verifier states representing loop iterations would never converge.
+ *
+ * fp[-16] = 0
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * r0 = fp[-16];
+ * r0 += 1;
+ * fp[-16] = r0;
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "r0 = 0;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "r0 = *(u64 *)(r10 - 16);"
+ "r0 += 1;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("raw_tp")
+__success
+__naked int checkpoint_states_deletion(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * int *a, *b, *c, *d, *e, *f;
+ * int i, sum = 0;
+ * bpf_for(i, 0, 10) {
+ * a = bpf_map_lookup_elem(&amap, &i);
+ * b = bpf_map_lookup_elem(&amap, &i);
+ * c = bpf_map_lookup_elem(&amap, &i);
+ * d = bpf_map_lookup_elem(&amap, &i);
+ * e = bpf_map_lookup_elem(&amap, &i);
+ * f = bpf_map_lookup_elem(&amap, &i);
+ * if (a) sum += 1;
+ * if (b) sum += 1;
+ * if (c) sum += 1;
+ * if (d) sum += 1;
+ * if (e) sum += 1;
+ * if (f) sum += 1;
+ * }
+ * return 0;
+ *
+ * The body of the loop spawns multiple simulation paths
+ * with different combination of NULL/non-NULL information for a/b/c/d/e/f.
+ * Each combination is unique from states_equal() point of view.
+ * Explored states checkpoint is created after each iterator next call.
+ * Iterator convergence logic expects that eventually current state
+ * would get equal to one of the explored states and thus loop
+ * exploration would be finished (at-least for a specific path).
+ * Verifier evicts explored states with high miss to hit ratio
+ * to to avoid comparing current state with too many explored
+ * states per instruction.
+ * This test is designed to "stress test" eviction policy defined using formula:
+ *
+ * sl->miss_cnt > sl->hit_cnt * N + N // if true sl->state is evicted
+ *
+ * Currently N is set to 64, which allows for 6 variables in this test.
+ */
+ asm volatile (
+ "r6 = 0;" /* a */
+ "r7 = 0;" /* b */
+ "r8 = 0;" /* c */
+ "*(u64 *)(r10 - 24) = r6;" /* d */
+ "*(u64 *)(r10 - 32) = r6;" /* e */
+ "*(u64 *)(r10 - 40) = r6;" /* f */
+ "r9 = 0;" /* sum */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+
+ "*(u64 *)(r10 - 16) = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "r6 = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "r7 = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "r8 = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "*(u64 *)(r10 - 24) = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "*(u64 *)(r10 - 32) = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "*(u64 *)(r10 - 40) = r0;"
+
+ "if r6 == 0 goto +1;"
+ "r9 += 1;"
+ "if r7 == 0 goto +1;"
+ "r9 += 1;"
+ "if r8 == 0 goto +1;"
+ "r9 += 1;"
+ "r0 = *(u64 *)(r10 - 24);"
+ "if r0 == 0 goto +1;"
+ "r9 += 1;"
+ "r0 = *(u64 *)(r10 - 32);"
+ "if r0 == 0 goto +1;"
+ "r9 += 1;"
+ "r0 = *(u64 *)(r10 - 40);"
+ "if r0 == 0 goto +1;"
+ "r9 += 1;"
+
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm_addr(amap)
+ : __clobber_all
+ );
+}
+
+struct {
+ int data[32];
+ int n;
+} loop_data;
+
+SEC("raw_tp")
+__success
+int iter_arr_with_actual_elem_count(const void *ctx)
+{
+ int i, n = loop_data.n, sum = 0;
+
+ if (n > ARRAY_SIZE(loop_data.data))
+ return 0;
+
+ bpf_for(i, 0, n) {
+ /* no rechecking of i against ARRAY_SIZE(loop_data.n) */
+ sum += loop_data.data[i];
+ }
+
+ return sum;
+}
+
+__u32 upper, select_n, result;
+__u64 global;
+
+static __noinline bool nest_2(char *str)
+{
+ /* some insns (including branch insns) to ensure stacksafe() is triggered
+ * in nest_2(). This way, stacksafe() can compare frame associated with nest_1().
+ */
+ if (str[0] == 't')
+ return true;
+ if (str[1] == 'e')
+ return true;
+ if (str[2] == 's')
+ return true;
+ if (str[3] == 't')
+ return true;
+ return false;
+}
+
+static __noinline bool nest_1(int n)
+{
+ /* case 0: allocate stack, case 1: no allocate stack */
+ switch (n) {
+ case 0: {
+ char comm[16];
+
+ if (bpf_get_current_comm(comm, 16))
+ return false;
+ return nest_2(comm);
+ }
+ case 1:
+ return nest_2((char *)&global);
+ default:
+ return false;
+ }
+}
+
+SEC("raw_tp")
+__success
+int iter_subprog_check_stacksafe(const void *ctx)
+{
+ long i;
+
+ bpf_for(i, 0, upper) {
+ if (!nest_1(select_n)) {
+ result = 1;
+ return 0;
+ }
+ }
+
+ result = 2;
+ return 0;
+}
+
+struct bpf_iter_num global_it;
+
+SEC("raw_tp")
+__failure __msg("arg#0 expected pointer to an iterator on stack")
+int iter_new_bad_arg(const void *ctx)
+{
+ bpf_iter_num_new(&global_it, 0, 1);
+ return 0;
+}
+
+SEC("raw_tp")
+__failure __msg("arg#0 expected pointer to an iterator on stack")
+int iter_next_bad_arg(const void *ctx)
+{
+ bpf_iter_num_next(&global_it);
+ return 0;
+}
+
+SEC("raw_tp")
+__failure __msg("arg#0 expected pointer to an iterator on stack")
+int iter_destroy_bad_arg(const void *ctx)
+{
+ bpf_iter_num_destroy(&global_it);
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int clean_live_states(const void *ctx)
+{
+ char buf[1];
+ int i, j, k, l, m, n, o;
+
+ bpf_for(i, 0, 10)
+ bpf_for(j, 0, 10)
+ bpf_for(k, 0, 10)
+ bpf_for(l, 0, 10)
+ bpf_for(m, 0, 10)
+ bpf_for(n, 0, 10)
+ bpf_for(o, 0, 10) {
+ if (unlikely(bpf_get_prandom_u32()))
+ buf[0] = 42;
+ bpf_printk("%s", buf);
+ }
+ return 0;
+}
+
+SEC("?raw_tp")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__naked int absent_mark_in_the_middle_state(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * r8 = bpf_get_prandom_u32();
+ * r6 = -32;
+ * bpf_iter_num_new(&fp[-8], 0, 10);
+ * if (unlikely(bpf_get_prandom_u32()))
+ * r6 = -31;
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * if (unlikely(bpf_get_prandom_u32()))
+ * *(fp + r6) = 7;
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r8 = r0;"
+ "r7 = 0;"
+ "r6 = -32;"
+ "r0 = 0;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto change_r6_%=;"
+ "loop_%=:"
+ "call noop;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto use_r6_%=;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "use_r6_%=:"
+ "r0 = r10;"
+ "r0 += r6;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "goto loop_%=;"
+ "change_r6_%=:"
+ "r6 = -31;"
+ "goto loop_%=;"
+ :
+ : __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+__used __naked
+static int noop(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ );
+}
+
+SEC("?raw_tp")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__naked int absent_mark_in_the_middle_state2(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * r8 = bpf_get_prandom_u32();
+ * r6 = -32;
+ * bpf_iter_num_new(&fp[-8], 0, 10);
+ * if (unlikely(bpf_get_prandom_u32())) {
+ * r6 = -31;
+ * jump_into_loop:
+ * goto +0;
+ * goto loop;
+ * }
+ * if (unlikely(bpf_get_prandom_u32()))
+ * goto jump_into_loop;
+ * loop:
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * if (unlikely(bpf_get_prandom_u32()))
+ * *(fp + r6) = 7;
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r8 = r0;"
+ "r7 = 0;"
+ "r6 = -32;"
+ "r0 = 0;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto change_r6_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto jump_into_loop_%=;"
+ "loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto use_r6_%=;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "use_r6_%=:"
+ "r0 = r10;"
+ "r0 += r6;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "goto loop_%=;"
+ "change_r6_%=:"
+ "r6 = -31;"
+ "jump_into_loop_%=: "
+ "goto +0;"
+ "goto loop_%=;"
+ :
+ : __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__naked int absent_mark_in_the_middle_state3(void)
+{
+ /*
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * loop1(-32, &fp[-8])
+ * loop1_wrapper(&fp[-8])
+ * bpf_iter_num_destroy(&fp[-8])
+ */
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ /* call #1 */
+ "r1 = -32;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "call loop1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ /* call #2 */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call loop1_wrapper;"
+ /* return */
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+__used __naked
+static int loop1(void)
+{
+ /*
+ * int loop1(num, iter) {
+ * r6 = num;
+ * r7 = iter;
+ * while (bpf_iter_num_next(r7)) {
+ * if (unlikely(bpf_get_prandom_u32()))
+ * *(fp + r6) = 7;
+ * }
+ * return 0
+ * }
+ */
+ asm volatile (
+ "r6 = r1;"
+ "r7 = r2;"
+ "call %[bpf_get_prandom_u32];"
+ "r8 = r0;"
+ "loop_%=:"
+ "r1 = r7;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto use_r6_%=;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r0 = 0;"
+ "exit;"
+ "use_r6_%=:"
+ "r0 = r10;"
+ "r0 += r6;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "goto loop_%=;"
+ :
+ : __imm(bpf_iter_num_next),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+__used __naked
+static int loop1_wrapper(void)
+{
+ /*
+ * int loop1_wrapper(iter) {
+ * r6 = -32;
+ * r7 = iter;
+ * if (unlikely(bpf_get_prandom_u32()))
+ * r6 = -31;
+ * loop1(r6, r7);
+ * return 0;
+ * }
+ */
+ asm volatile (
+ "r6 = -32;"
+ "r7 = r1;"
+ "call %[bpf_get_prandom_u32];"
+ "r8 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto change_r6_%=;"
+ "loop_%=:"
+ "r1 = r6;"
+ "r2 = r7;"
+ "call loop1;"
+ "r0 = 0;"
+ "exit;"
+ "change_r6_%=:"
+ "r6 = -31;"
+ "goto loop_%=;"
+ :
+ : __imm(bpf_iter_num_next),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/iters_css.c b/tools/testing/selftests/bpf/progs/iters_css.c
new file mode 100644
index 000000000000..ec1f6c2f590b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_css.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t target_pid;
+u64 root_cg_id, leaf_cg_id;
+u64 first_cg_id, last_cg_id;
+
+int pre_order_cnt, post_order_cnt, tree_high;
+
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+SEC("fentry.s/" SYS_PREFIX "sys_getpgid")
+int iter_css_for_each(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct cgroup_subsys_state *root_css, *leaf_css, *pos;
+ struct cgroup *root_cgrp, *leaf_cgrp, *cur_cgrp;
+
+ if (cur_task->pid != target_pid)
+ return 0;
+
+ root_cgrp = bpf_cgroup_from_id(root_cg_id);
+
+ if (!root_cgrp)
+ return 0;
+
+ leaf_cgrp = bpf_cgroup_from_id(leaf_cg_id);
+
+ if (!leaf_cgrp) {
+ bpf_cgroup_release(root_cgrp);
+ return 0;
+ }
+ root_css = &root_cgrp->self;
+ leaf_css = &leaf_cgrp->self;
+ pre_order_cnt = post_order_cnt = tree_high = 0;
+ first_cg_id = last_cg_id = 0;
+
+ bpf_rcu_read_lock();
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) {
+ cur_cgrp = pos->cgroup;
+ post_order_cnt++;
+ last_cg_id = cur_cgrp->kn->id;
+ }
+
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_PRE) {
+ cur_cgrp = pos->cgroup;
+ pre_order_cnt++;
+ if (!first_cg_id)
+ first_cg_id = cur_cgrp->kn->id;
+ }
+
+ bpf_for_each(css, pos, leaf_css, BPF_CGROUP_ITER_ANCESTORS_UP)
+ tree_high++;
+
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_ANCESTORS_UP)
+ tree_high--;
+ bpf_rcu_read_unlock();
+ bpf_cgroup_release(root_cgrp);
+ bpf_cgroup_release(leaf_cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_css_task.c b/tools/testing/selftests/bpf/progs/iters_css_task.c
new file mode 100644
index 000000000000..9ac758649cb8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_css_task.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+
+pid_t target_pid;
+int css_task_cnt;
+u64 cg_id;
+
+SEC("lsm/file_mprotect")
+int BPF_PROG(iter_css_task_for_each, struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot, int ret)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+ struct cgroup *cgrp;
+
+ if (cur_task->pid != target_pid)
+ return ret;
+
+ cgrp = bpf_cgroup_from_id(cg_id);
+
+ if (!cgrp)
+ return -EPERM;
+
+ css = &cgrp->self;
+ css_task_cnt = 0;
+
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS)
+ if (task->pid == target_pid)
+ css_task_cnt++;
+
+ bpf_cgroup_release(cgrp);
+
+ return -EPERM;
+}
+
+static inline u64 cgroup_id(struct cgroup *cgrp)
+{
+ return cgrp->kn->id;
+}
+
+SEC("?iter/cgroup")
+int cgroup_id_printer(struct bpf_iter__cgroup *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct cgroup *cgrp = ctx->cgroup;
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+
+ /* epilogue */
+ if (cgrp == NULL) {
+ BPF_SEQ_PRINTF(seq, "epilogue\n");
+ return 0;
+ }
+
+ /* prologue */
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(seq, "prologue\n");
+
+ BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp));
+
+ css = &cgrp->self;
+ css_task_cnt = 0;
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+ if (task->pid == target_pid)
+ css_task_cnt++;
+ }
+
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int BPF_PROG(iter_css_task_for_each_sleep)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cgrp_id);
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+
+ if (cgrp == NULL)
+ return 0;
+ css = &cgrp->self;
+
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+
+ }
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_looping.c b/tools/testing/selftests/bpf/progs/iters_looping.c
new file mode 100644
index 000000000000..05fa5ce7fc59
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_looping.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <errno.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define ITER_HELPERS \
+ __imm(bpf_iter_num_new), \
+ __imm(bpf_iter_num_next), \
+ __imm(bpf_iter_num_destroy)
+
+SEC("?raw_tp")
+__success
+int force_clang_to_emit_btf_for_externs(void *ctx)
+{
+ /* we need this as a workaround to enforce compiler emitting BTF
+ * information for bpf_iter_num_{new,next,destroy}() kfuncs,
+ * as, apparently, it doesn't emit it for symbols only referenced from
+ * assembly (or cleanup attribute, for that matter, as well)
+ */
+ bpf_repeat(0);
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__success
+int consume_first_item_only(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* consume first item */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+
+ "if r0 == 0 goto +1;"
+ "r0 = *(u32 *)(r0 + 0);"
+
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("R0 invalid mem access 'scalar'")
+int missing_null_check_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* consume first element */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+
+ /* FAIL: deref with no NULL check */
+ "r1 = *(u32 *)(r0 + 0);"
+
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure
+__msg("invalid access to memory, mem_size=4 off=0 size=8")
+__msg("R0 min value is outside of the allowed memory range")
+int wrong_sized_read_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* consume first element */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+
+ "if r0 == 0 goto +1;"
+ /* FAIL: deref more than available 4 bytes */
+ "r0 = *(u64 *)(r0 + 0);"
+
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+int simplest_loop(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ "r6 = 0;" /* init sum */
+
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+
+ "1:"
+ /* consume next item */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+
+ "if r0 == 0 goto 2f;"
+ "r0 = *(u32 *)(r0 + 0);"
+ "r6 += r0;" /* accumulate sum */
+ "goto 1b;"
+
+ "2:"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common, "r6"
+ );
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_num.c b/tools/testing/selftests/bpf/progs/iters_num.c
new file mode 100644
index 000000000000..7a77a8daee0d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_num.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <limits.h>
+#include <linux/errno.h>
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+const volatile __s64 exp_empty_zero = 0 + 1;
+__s64 res_empty_zero;
+
+SEC("raw_tp/sys_enter")
+int num_empty_zero(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, 0, 0) sum += i;
+ res_empty_zero = 1 + sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_empty_int_min = 0 + 2;
+__s64 res_empty_int_min;
+
+SEC("raw_tp/sys_enter")
+int num_empty_int_min(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, INT_MIN, INT_MIN) sum += i;
+ res_empty_int_min = 2 + sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_empty_int_max = 0 + 3;
+__s64 res_empty_int_max;
+
+SEC("raw_tp/sys_enter")
+int num_empty_int_max(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, INT_MAX, INT_MAX) sum += i;
+ res_empty_int_max = 3 + sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_empty_minus_one = 0 + 4;
+__s64 res_empty_minus_one;
+
+SEC("raw_tp/sys_enter")
+int num_empty_minus_one(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, -1, -1) sum += i;
+ res_empty_minus_one = 4 + sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_simple_sum = 9 * 10 / 2;
+__s64 res_simple_sum;
+
+SEC("raw_tp/sys_enter")
+int num_simple_sum(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, 0, 10) sum += i;
+ res_simple_sum = sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_neg_sum = -11 * 10 / 2;
+__s64 res_neg_sum;
+
+SEC("raw_tp/sys_enter")
+int num_neg_sum(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, -10, 0) sum += i;
+ res_neg_sum = sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_very_neg_sum = INT_MIN + (__s64)(INT_MIN + 1);
+__s64 res_very_neg_sum;
+
+SEC("raw_tp/sys_enter")
+int num_very_neg_sum(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, INT_MIN, INT_MIN + 2) sum += i;
+ res_very_neg_sum = sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_very_big_sum = (__s64)(INT_MAX - 1) + (__s64)(INT_MAX - 2);
+__s64 res_very_big_sum;
+
+SEC("raw_tp/sys_enter")
+int num_very_big_sum(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, INT_MAX - 2, INT_MAX) sum += i;
+ res_very_big_sum = sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_neg_pos_sum = -3;
+__s64 res_neg_pos_sum;
+
+SEC("raw_tp/sys_enter")
+int num_neg_pos_sum(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, -3, 3) sum += i;
+ res_neg_pos_sum = sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_invalid_range = -EINVAL;
+__s64 res_invalid_range;
+
+SEC("raw_tp/sys_enter")
+int num_invalid_range(const void *ctx)
+{
+ struct bpf_iter_num it;
+
+ res_invalid_range = bpf_iter_num_new(&it, 1, 0);
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+const volatile __s64 exp_max_range = 0 + 10;
+__s64 res_max_range;
+
+SEC("raw_tp/sys_enter")
+int num_max_range(const void *ctx)
+{
+ struct bpf_iter_num it;
+
+ res_max_range = 10 + bpf_iter_num_new(&it, 0, BPF_MAX_LOOPS);
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+const volatile __s64 exp_e2big_range = -E2BIG;
+__s64 res_e2big_range;
+
+SEC("raw_tp/sys_enter")
+int num_e2big_range(const void *ctx)
+{
+ struct bpf_iter_num it;
+
+ res_e2big_range = bpf_iter_num_new(&it, -1, BPF_MAX_LOOPS);
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+const volatile __s64 exp_succ_elem_cnt = 10;
+__s64 res_succ_elem_cnt;
+
+SEC("raw_tp/sys_enter")
+int num_succ_elem_cnt(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int cnt = 0, *v;
+
+ bpf_iter_num_new(&it, 0, 10);
+ while ((v = bpf_iter_num_next(&it))) {
+ cnt++;
+ }
+ bpf_iter_num_destroy(&it);
+
+ res_succ_elem_cnt = cnt;
+
+ return 0;
+}
+
+const volatile __s64 exp_overfetched_elem_cnt = 5;
+__s64 res_overfetched_elem_cnt;
+
+SEC("raw_tp/sys_enter")
+int num_overfetched_elem_cnt(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int cnt = 0, *v, i;
+
+ bpf_iter_num_new(&it, 0, 5);
+ for (i = 0; i < 10; i++) {
+ v = bpf_iter_num_next(&it);
+ if (v)
+ cnt++;
+ }
+ bpf_iter_num_destroy(&it);
+
+ res_overfetched_elem_cnt = cnt;
+
+ return 0;
+}
+
+const volatile __s64 exp_fail_elem_cnt = 20 + 0;
+__s64 res_fail_elem_cnt;
+
+SEC("raw_tp/sys_enter")
+int num_fail_elem_cnt(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int cnt = 0, *v, i;
+
+ bpf_iter_num_new(&it, 100, 10);
+ for (i = 0; i < 10; i++) {
+ v = bpf_iter_num_next(&it);
+ if (v)
+ cnt++;
+ }
+ bpf_iter_num_destroy(&it);
+
+ res_fail_elem_cnt = 20 + cnt;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/iters_state_safety.c b/tools/testing/selftests/bpf/progs/iters_state_safety.c
new file mode 100644
index 000000000000..f41257eadbb2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_state_safety.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include <errno.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define ITER_HELPERS \
+ __imm(bpf_iter_num_new), \
+ __imm(bpf_iter_num_next), \
+ __imm(bpf_iter_num_destroy)
+
+SEC("?raw_tp")
+__success
+int force_clang_to_emit_btf_for_externs(void *ctx)
+{
+ /* we need this as a workaround to enforce compiler emitting BTF
+ * information for bpf_iter_num_{new,next,destroy}() kfuncs,
+ * as, apparently, it doesn't emit it for symbols only referenced from
+ * assembly (or cleanup attribute, for that matter, as well)
+ */
+ bpf_repeat(0);
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)")
+int create_and_destroy(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("Unreleased reference id=1")
+int create_and_forget_to_destroy_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected an initialized iter_num as arg #0")
+int destroy_without_creating_fail(void *ctx)
+{
+ /* init with zeros to stop verifier complaining about uninit stack */
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected an initialized iter_num as arg #0")
+int compromise_iter_w_direct_write_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* directly write over first half of iter state */
+ "*(u64 *)(%[iter] + 0) = r0;"
+
+ /* (attempt to) destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("Unreleased reference id=1")
+int compromise_iter_w_direct_write_and_skip_destroy_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* directly write over first half of iter state */
+ "*(u64 *)(%[iter] + 0) = r0;"
+
+ /* don't destroy iter, leaking ref, which should fail */
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected an initialized iter_num as arg #0")
+int compromise_iter_w_helper_write_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* overwrite 8th byte with bpf_probe_read_kernel() */
+ "r1 = %[iter];"
+ "r1 += 7;"
+ "r2 = 1;"
+ "r3 = 0;" /* NULL */
+ "call %[bpf_probe_read_kernel];"
+
+ /* (attempt to) destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS, __imm(bpf_probe_read_kernel)
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+static __noinline void subprog_with_iter(void)
+{
+ struct bpf_iter_num iter;
+
+ bpf_iter_num_new(&iter, 0, 1);
+
+ return;
+}
+
+SEC("?raw_tp")
+__failure
+/* ensure there was a call to subprog, which might happen without __noinline */
+__msg("returning from callee:")
+__msg("Unreleased reference id=1")
+int leak_iter_from_subprog_fail(void *ctx)
+{
+ subprog_with_iter();
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)")
+int valid_stack_reuse(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+
+ /* now reuse same stack slots */
+
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected uninitialized iter_num as arg #0")
+int double_create_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* (attempt to) create iterator again */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected an initialized iter_num as arg #0")
+int double_destroy_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ /* (attempt to) destroy iterator again */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected an initialized iter_num as arg #0")
+int next_without_new_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* don't create iterator and try to iterate*/
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected an initialized iter_num as arg #0")
+int next_after_destroy_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ /* don't create iterator and try to iterate*/
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int __naked read_from_iter_slot_fail(void)
+{
+ asm volatile (
+ /* r6 points to struct bpf_iter_num on the stack */
+ "r6 = r10;"
+ "r6 += -24;"
+
+ /* create iterator */
+ "r1 = r6;"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* attemp to leak bpf_iter_num state */
+ "r7 = *(u64 *)(r6 + 0);"
+ "r8 = *(u64 *)(r6 + 8);"
+
+ /* destroy iterator */
+ "r1 = r6;"
+ "call %[bpf_iter_num_destroy];"
+
+ /* leak bpf_iter_num state */
+ "r0 = r7;"
+ "if r7 > r8 goto +1;"
+ "r0 = r8;"
+ "exit;"
+ :
+ : ITER_HELPERS
+ : __clobber_common, "r6", "r7", "r8"
+ );
+}
+
+int zero;
+
+SEC("?raw_tp")
+__failure
+__flag(BPF_F_TEST_STATE_FREQ)
+__msg("Unreleased reference")
+int stacksafe_should_not_conflate_stack_spill_and_iter(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* Create a fork in logic, with general setup as follows:
+ * - fallthrough (first) path is valid;
+ * - branch (second) path is invalid.
+ * Then depending on what we do in fallthrough vs branch path,
+ * we try to detect bugs in func_states_equal(), regsafe(),
+ * refsafe(), stack_safe(), and similar by tricking verifier
+ * into believing that branch state is a valid subset of
+ * a fallthrough state. Verifier should reject overall
+ * validation, unless there is a bug somewhere in verifier
+ * logic.
+ */
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+
+ "if r6 > r7 goto bad;" /* fork */
+
+ /* spill r6 into stack slot of bpf_iter_num var */
+ "*(u64 *)(%[iter] + 0) = r6;"
+
+ "goto skip_bad;"
+
+ "bad:"
+ /* create iterator in the same stack slot */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* but then forget about it and overwrite it back to r6 spill */
+ "*(u64 *)(%[iter] + 0) = r6;"
+
+ "skip_bad:"
+ "goto +0;" /* force checkpoint */
+
+ /* corrupt stack slots, if they are really dynptr */
+ "*(u64 *)(%[iter] + 0) = r6;"
+ :
+ : __imm_ptr(iter),
+ __imm_addr(zero),
+ __imm(bpf_get_prandom_u32),
+ __imm(bpf_dynptr_from_mem),
+ ITER_HELPERS
+ : __clobber_common, "r6", "r7"
+ );
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_task.c b/tools/testing/selftests/bpf/progs/iters_task.c
new file mode 100644
index 000000000000..e4d53e40ff20
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_task.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t target_pid;
+int procs_cnt, threads_cnt, proc_threads_cnt, invalid_cnt;
+
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+SEC("fentry.s/" SYS_PREFIX "sys_getpgid")
+int iter_task_for_each_sleep(void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct task_struct *pos;
+
+ if (cur_task->pid != target_pid)
+ return 0;
+ procs_cnt = threads_cnt = proc_threads_cnt = 0;
+
+ bpf_rcu_read_lock();
+ bpf_for_each(task, pos, NULL, ~0U) {
+ /* Below instructions shouldn't be executed for invalid flags */
+ invalid_cnt++;
+ }
+
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_PROC_THREADS) {
+ /* Below instructions shouldn't be executed for invalid task__nullable */
+ invalid_cnt++;
+ }
+
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS)
+ if (pos->pid == target_pid)
+ procs_cnt++;
+
+ bpf_for_each(task, pos, cur_task, BPF_TASK_ITER_PROC_THREADS)
+ proc_threads_cnt++;
+
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_THREADS)
+ if (pos->tgid == target_pid)
+ threads_cnt++;
+ bpf_rcu_read_unlock();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_task_failure.c b/tools/testing/selftests/bpf/progs/iters_task_failure.c
new file mode 100644
index 000000000000..6b1588d70652
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_task_failure.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("expected an RCU CS when using bpf_iter_task_next")
+int BPF_PROG(iter_tasks_without_lock)
+{
+ struct task_struct *pos;
+
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) {
+
+ }
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("expected an RCU CS when using bpf_iter_css_next")
+int BPF_PROG(iter_css_without_lock)
+{
+ u64 cg_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cg_id);
+ struct cgroup_subsys_state *root_css, *pos;
+
+ if (!cgrp)
+ return 0;
+ root_css = &cgrp->self;
+
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) {
+
+ }
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("expected an RCU CS when using bpf_iter_task_next")
+int BPF_PROG(iter_tasks_lock_and_unlock)
+{
+ struct task_struct *pos;
+
+ bpf_rcu_read_lock();
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) {
+ bpf_rcu_read_unlock();
+
+ bpf_rcu_read_lock();
+ }
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("expected an RCU CS when using bpf_iter_css_next")
+int BPF_PROG(iter_css_lock_and_unlock)
+{
+ u64 cg_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cg_id);
+ struct cgroup_subsys_state *root_css, *pos;
+
+ if (!cgrp)
+ return 0;
+ root_css = &cgrp->self;
+
+ bpf_rcu_read_lock();
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) {
+ bpf_rcu_read_unlock();
+
+ bpf_rcu_read_lock();
+ }
+ bpf_rcu_read_unlock();
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+__failure __msg("css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs")
+int BPF_PROG(iter_css_task_for_each)
+{
+ u64 cg_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cg_id);
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+
+ if (cgrp == NULL)
+ return 0;
+ css = &cgrp->self;
+
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+
+ }
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_task_vma.c b/tools/testing/selftests/bpf/progs/iters_task_vma.c
new file mode 100644
index 000000000000..dc0c3691dcc2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_task_vma.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_experimental.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+pid_t target_pid = 0;
+unsigned int vmas_seen = 0;
+
+struct {
+ __u64 vm_start;
+ __u64 vm_end;
+} vm_ranges[1000];
+
+SEC("raw_tp/sys_enter")
+int iter_task_vma_for_each(const void *ctx)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct vm_area_struct *vma;
+ unsigned int seen = 0;
+
+ if (task->pid != target_pid)
+ return 0;
+
+ if (vmas_seen)
+ return 0;
+
+ bpf_for_each(task_vma, vma, task, 0) {
+ if (bpf_cmp_unlikely(seen, >=, 1000))
+ break;
+
+ vm_ranges[seen].vm_start = vma->vm_start;
+ vm_ranges[seen].vm_end = vma->vm_end;
+ seen++;
+ }
+
+ vmas_seen = seen;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/iters_testmod.c b/tools/testing/selftests/bpf/progs/iters_testmod.c
new file mode 100644
index 000000000000..9e4b45201e69
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_testmod.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include "bpf_experimental.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("raw_tp/sys_enter")
+__success
+int iter_next_trusted(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task_vma vma_it;
+ struct vm_area_struct *vma_ptr;
+
+ bpf_iter_task_vma_new(&vma_it, cur_task, 0);
+
+ vma_ptr = bpf_iter_task_vma_next(&vma_it);
+ if (vma_ptr == NULL)
+ goto out;
+
+ bpf_kfunc_trusted_vma_test(vma_ptr);
+out:
+ bpf_iter_task_vma_destroy(&vma_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int iter_next_trusted_or_null(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task_vma vma_it;
+ struct vm_area_struct *vma_ptr;
+
+ bpf_iter_task_vma_new(&vma_it, cur_task, 0);
+
+ vma_ptr = bpf_iter_task_vma_next(&vma_it);
+
+ bpf_kfunc_trusted_vma_test(vma_ptr);
+
+ bpf_iter_task_vma_destroy(&vma_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__success
+int iter_next_rcu(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task task_it;
+ struct task_struct *task_ptr;
+
+ bpf_iter_task_new(&task_it, cur_task, 0);
+
+ task_ptr = bpf_iter_task_next(&task_it);
+ if (task_ptr == NULL)
+ goto out;
+
+ bpf_kfunc_rcu_task_test(task_ptr);
+out:
+ bpf_iter_task_destroy(&task_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int iter_next_rcu_or_null(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task task_it;
+ struct task_struct *task_ptr;
+
+ bpf_iter_task_new(&task_it, cur_task, 0);
+
+ task_ptr = bpf_iter_task_next(&task_it);
+
+ bpf_kfunc_rcu_task_test(task_ptr);
+
+ bpf_iter_task_destroy(&task_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__failure __msg("R1 must be referenced or trusted")
+int iter_next_rcu_not_trusted(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task task_it;
+ struct task_struct *task_ptr;
+
+ bpf_iter_task_new(&task_it, cur_task, 0);
+
+ task_ptr = bpf_iter_task_next(&task_it);
+ if (task_ptr == NULL)
+ goto out;
+
+ bpf_kfunc_trusted_task_test(task_ptr);
+out:
+ bpf_iter_task_destroy(&task_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__failure __msg("R1 cannot write into rdonly_mem")
+/* Message should not be 'R1 cannot write into rdonly_trusted_mem' */
+int iter_next_ptr_mem_not_trusted(const void *ctx)
+{
+ struct bpf_iter_num num_it;
+ int *num_ptr;
+
+ bpf_iter_num_new(&num_it, 0, 10);
+
+ num_ptr = bpf_iter_num_next(&num_it);
+ if (num_ptr == NULL)
+ goto out;
+
+ bpf_kfunc_trusted_num_test(num_ptr);
+out:
+ bpf_iter_num_destroy(&num_it);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
new file mode 100644
index 000000000000..6543d5b6e0a9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct bpf_iter_testmod_seq {
+ u64 :64;
+ u64 :64;
+};
+
+extern int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) __ksym;
+extern s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq *it) __ksym;
+extern s64 bpf_iter_testmod_seq_value(int blah, struct bpf_iter_testmod_seq *it) __ksym;
+extern void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) __ksym;
+
+const volatile __s64 exp_empty = 0 + 1;
+__s64 res_empty;
+
+SEC("raw_tp/sys_enter")
+__success __log_level(2)
+__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)")
+__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
+__msg("call bpf_iter_testmod_seq_destroy")
+int testmod_seq_empty(const void *ctx)
+{
+ __s64 sum = 0, *i;
+
+ bpf_for_each(testmod_seq, i, 1000, 0) sum += *i;
+ res_empty = 1 + sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_full = 1000000;
+__s64 res_full;
+
+SEC("raw_tp/sys_enter")
+__success __log_level(2)
+__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)")
+__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
+__msg("call bpf_iter_testmod_seq_destroy")
+int testmod_seq_full(const void *ctx)
+{
+ __s64 sum = 0, *i;
+
+ bpf_for_each(testmod_seq, i, 1000, 1000) sum += *i;
+ res_full = sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_truncated = 10 * 1000000;
+__s64 res_truncated;
+
+static volatile int zero = 0;
+
+SEC("raw_tp/sys_enter")
+__success __log_level(2)
+__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)")
+__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
+__msg("call bpf_iter_testmod_seq_destroy")
+int testmod_seq_truncated(const void *ctx)
+{
+ __s64 sum = 0, *i;
+ int cnt = zero;
+
+ bpf_for_each(testmod_seq, i, 10, 2000000) {
+ sum += *i;
+ cnt++;
+ if (cnt >= 1000000)
+ break;
+ }
+ res_truncated = sum;
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure
+__msg("expected an initialized iter_testmod_seq as arg #1")
+int testmod_seq_getter_before_bad(const void *ctx)
+{
+ struct bpf_iter_testmod_seq it;
+
+ return bpf_iter_testmod_seq_value(0, &it);
+}
+
+SEC("?raw_tp")
+__failure
+__msg("expected an initialized iter_testmod_seq as arg #1")
+int testmod_seq_getter_after_bad(const void *ctx)
+{
+ struct bpf_iter_testmod_seq it;
+ s64 sum = 0, *v;
+
+ bpf_iter_testmod_seq_new(&it, 100, 100);
+
+ while ((v = bpf_iter_testmod_seq_next(&it))) {
+ sum += *v;
+ }
+
+ bpf_iter_testmod_seq_destroy(&it);
+
+ return sum + bpf_iter_testmod_seq_value(0, &it);
+}
+
+SEC("?socket")
+__success __retval(1000000)
+int testmod_seq_getter_good(const void *ctx)
+{
+ struct bpf_iter_testmod_seq it;
+ s64 sum = 0, *v;
+
+ bpf_iter_testmod_seq_new(&it, 100, 100);
+
+ while ((v = bpf_iter_testmod_seq_next(&it))) {
+ sum += *v;
+ }
+
+ sum *= bpf_iter_testmod_seq_value(0, &it);
+
+ bpf_iter_testmod_seq_destroy(&it);
+
+ return sum;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
new file mode 100644
index 000000000000..4d619bea9c75
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#endif
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, u64);
+ __type(value, u64);
+} m_hash SEC(".maps");
+
+SEC("?raw_tp")
+__failure __msg("R8 invalid mem access 'map_value_or_null")
+int jeq_infer_not_null_ptr_to_btfid(void *ctx)
+{
+ struct bpf_map *map = (struct bpf_map *)&m_hash;
+ struct bpf_map *inner_map = map->inner_map_meta;
+ u64 key = 0, ret = 0, *val;
+
+ val = bpf_map_lookup_elem(map, &key);
+ /* Do not mark ptr as non-null if one of them is
+ * PTR_TO_BTF_ID (R9), reject because of invalid
+ * access to map value (R8).
+ *
+ * Here, we need to inline those insns to access
+ * R8 directly, since compiler may use other reg
+ * once it figures out val==inner_map.
+ */
+ asm volatile("r8 = %[val];\n"
+ "r9 = %[inner_map];\n"
+ "if r8 != r9 goto +1;\n"
+ "%[ret] = *(u64 *)(r8 +0);\n"
+ : [ret] "+r"(ret)
+ : [inner_map] "r"(inner_map), [val] "r"(val)
+ : "r8", "r9");
+
+ return ret;
+}
diff --git a/tools/testing/selftests/bpf/progs/jit_probe_mem.c b/tools/testing/selftests/bpf/progs/jit_probe_mem.c
new file mode 100644
index 000000000000..82190d79de37
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/jit_probe_mem.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+static struct prog_test_ref_kfunc __kptr *v;
+long total_sum = -1;
+
+SEC("tc")
+int test_jit_probe_mem(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ unsigned long zero = 0, sum;
+
+ p = bpf_kfunc_call_test_acquire(&zero);
+ if (!p)
+ return 1;
+
+ p = bpf_kptr_xchg(&v, p);
+ if (p)
+ goto release_out;
+
+ /* Direct map value access of kptr, should be PTR_UNTRUSTED */
+ p = v;
+ if (!p)
+ return 1;
+
+ asm volatile (
+ "r9 = %[p];"
+ "%[sum] = 0;"
+
+ /* r8 = p->a */
+ "r8 = *(u32 *)(r9 + 0);"
+ "%[sum] += r8;"
+
+ /* r8 = p->b */
+ "r8 = *(u32 *)(r9 + 4);"
+ "%[sum] += r8;"
+
+ "r9 += 8;"
+ /* r9 = p->a */
+ "r9 = *(u32 *)(r9 - 8);"
+ "%[sum] += r9;"
+
+ : [sum] "=r"(sum)
+ : [p] "r"(p)
+ : "r8", "r9"
+ );
+
+ total_sum = sum;
+ return 0;
+release_out:
+ bpf_kfunc_call_test_release(p);
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c
index a46a264ce24e..7236da72ce80 100644
--- a/tools/testing/selftests/bpf/progs/kfree_skb.c
+++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
@@ -9,8 +9,8 @@
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} perf_buf_map SEC(".maps");
#define _(P) (__builtin_preserve_access_index(P))
@@ -109,10 +109,10 @@ int BPF_PROG(trace_kfree_skb, struct sk_buff *skb, void *location)
return 0;
}
-static volatile struct {
+struct {
bool fentry_test_ok;
bool fexit_test_ok;
-} result;
+} result = {};
SEC("fentry/eth_type_trans")
int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, struct net_device *dev,
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c
new file mode 100644
index 000000000000..b9670e9a6e3d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("tc")
+int kfunc_destructive_test(void)
+{
+ bpf_kfunc_call_test_destructive();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
new file mode 100644
index 000000000000..a1963497f0bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+struct syscall_test_args {
+ __u8 data[16];
+ size_t size;
+};
+
+SEC("?syscall")
+int kfunc_syscall_test_fail(struct syscall_test_args *args)
+{
+ bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args) + 1);
+
+ return 0;
+}
+
+SEC("?syscall")
+int kfunc_syscall_test_null_fail(struct syscall_test_args *args)
+{
+ /* Must be called with args as a NULL pointer
+ * we do not check for it to have the verifier consider that
+ * the pointer might not be null, and so we can load it.
+ *
+ * So the following can not be added:
+ *
+ * if (args)
+ * return -22;
+ */
+
+ bpf_kfunc_call_test_mem_len_pass1(args, sizeof(*args));
+
+ return 0;
+}
+
+SEC("?tc")
+int kfunc_call_test_get_mem_fail_rdonly(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int *p = NULL;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
+ if (p)
+ p[0] = 42; /* this is a read-only buffer, so -EACCES */
+ else
+ ret = -1;
+
+ bpf_kfunc_call_test_release(pt);
+ }
+ return ret;
+}
+
+SEC("?tc")
+int kfunc_call_test_get_mem_fail_use_after_free(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int *p = NULL;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int));
+ if (p) {
+ p[0] = 42;
+ ret = p[1]; /* 108 */
+ } else {
+ ret = -1;
+ }
+
+ bpf_kfunc_call_test_release(pt);
+ }
+ if (p)
+ ret = p[0]; /* p is not valid anymore */
+
+ return ret;
+}
+
+SEC("?tc")
+int kfunc_call_test_get_mem_fail_oob(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int *p = NULL;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
+ if (p)
+ ret = p[2 * sizeof(int)]; /* oob access, so -EACCES */
+ else
+ ret = -1;
+
+ bpf_kfunc_call_test_release(pt);
+ }
+ return ret;
+}
+
+int not_const_size = 2 * sizeof(int);
+
+SEC("?tc")
+int kfunc_call_test_get_mem_fail_not_const(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int *p = NULL;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ p = bpf_kfunc_call_test_get_rdonly_mem(pt, not_const_size); /* non const size, -EINVAL */
+ if (p)
+ ret = p[0];
+ else
+ ret = -1;
+
+ bpf_kfunc_call_test_release(pt);
+ }
+ return ret;
+}
+
+SEC("?tc")
+int kfunc_call_test_mem_acquire_fail(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int *p = NULL;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ /* we are failing on this one, because we are not acquiring a PTR_TO_BTF_ID (a struct ptr) */
+ p = bpf_kfunc_call_test_acq_rdonly_mem(pt, 2 * sizeof(int));
+ if (p)
+ ret = p[0];
+ else
+ ret = -1;
+
+ bpf_kfunc_call_int_mem_release(p);
+
+ bpf_kfunc_call_test_release(pt);
+ }
+ return ret;
+}
+
+SEC("?tc")
+int kfunc_call_test_pointer_arg_type_mismatch(struct __sk_buff *skb)
+{
+ bpf_kfunc_call_test_pass_ctx((void *)10);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_race.c b/tools/testing/selftests/bpf/progs/kfunc_call_race.c
new file mode 100644
index 000000000000..48f64827cd93
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_race.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("tc")
+int kfunc_call_fail(struct __sk_buff *ctx)
+{
+ bpf_testmod_test_mod_kfunc(0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
new file mode 100644
index 000000000000..8b86113a0126
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("tc")
+int kfunc_call_test4(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+ long tmp;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ tmp = bpf_kfunc_call_test4(-3, -30, -200, -1000);
+ return (tmp >> 32) + tmp;
+}
+
+SEC("tc")
+int kfunc_call_test2(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ return bpf_kfunc_call_test2((struct sock *)sk, 1, 2);
+}
+
+SEC("tc")
+int kfunc_call_test1(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+ __u64 a = 1ULL << 32;
+ __u32 ret;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ a = bpf_kfunc_call_test1((struct sock *)sk, 1, a | 2, 3, a | 4);
+ ret = a >> 32; /* ret should be 2 */
+ ret += (__u32)a; /* ret should be 12 */
+
+ return ret;
+}
+
+SEC("tc")
+int kfunc_call_test_ref_btf_id(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ if (pt->a != 42 || pt->b != 108)
+ ret = -1;
+ bpf_kfunc_call_test_release(pt);
+ }
+ return ret;
+}
+
+SEC("tc")
+int kfunc_call_test_pass(struct __sk_buff *skb)
+{
+ struct prog_test_pass1 p1 = {};
+ struct prog_test_pass2 p2 = {};
+ short a = 0;
+ __u64 b = 0;
+ long c = 0;
+ char d = 0;
+ int e = 0;
+
+ bpf_kfunc_call_test_pass_ctx(skb);
+ bpf_kfunc_call_test_pass1(&p1);
+ bpf_kfunc_call_test_pass2(&p2);
+
+ bpf_kfunc_call_test_mem_len_pass1(&a, sizeof(a));
+ bpf_kfunc_call_test_mem_len_pass1(&b, sizeof(b));
+ bpf_kfunc_call_test_mem_len_pass1(&c, sizeof(c));
+ bpf_kfunc_call_test_mem_len_pass1(&d, sizeof(d));
+ bpf_kfunc_call_test_mem_len_pass1(&e, sizeof(e));
+ bpf_kfunc_call_test_mem_len_fail2(&b, -1);
+
+ return 0;
+}
+
+struct syscall_test_args {
+ __u8 data[16];
+ size_t size;
+};
+
+SEC("syscall")
+int kfunc_syscall_test(struct syscall_test_args *args)
+{
+ const long size = args->size;
+
+ if (size > sizeof(args->data))
+ return -7; /* -E2BIG */
+
+ bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(args->data));
+ bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args));
+ bpf_kfunc_call_test_mem_len_pass1(&args->data, size);
+
+ return 0;
+}
+
+SEC("syscall")
+int kfunc_syscall_test_null(struct syscall_test_args *args)
+{
+ /* Must be called with args as a NULL pointer
+ * we do not check for it to have the verifier consider that
+ * the pointer might not be null, and so we can load it.
+ *
+ * So the following can not be added:
+ *
+ * if (args)
+ * return -22;
+ */
+
+ bpf_kfunc_call_test_mem_len_pass1(args, 0);
+
+ return 0;
+}
+
+SEC("tc")
+int kfunc_call_test_get_mem(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int *p = NULL;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int));
+ if (p) {
+ p[0] = 42;
+ ret = p[1]; /* 108 */
+ } else {
+ ret = -1;
+ }
+
+ if (ret >= 0) {
+ p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
+ if (p)
+ ret = p[0]; /* 42 */
+ else
+ ret = -1;
+ }
+
+ bpf_kfunc_call_test_release(pt);
+ }
+ return ret;
+}
+
+SEC("tc")
+int kfunc_call_test_static_unused_arg(struct __sk_buff *skb)
+{
+
+ u32 expected = 5, actual;
+
+ actual = bpf_kfunc_call_test_static_unused_arg(expected, 0xdeadbeef);
+ return actual != expected ? -1 : 0;
+}
+
+struct ctx_val {
+ struct bpf_testmod_ctx __kptr *ctx;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct ctx_val);
+} ctx_map SEC(".maps");
+
+SEC("tc")
+int kfunc_call_ctx(struct __sk_buff *skb)
+{
+ struct bpf_testmod_ctx *ctx;
+ int err = 0;
+
+ ctx = bpf_testmod_ctx_create(&err);
+ if (!ctx && !err)
+ err = -1;
+ if (ctx) {
+ int key = 0;
+ struct ctx_val *ctx_val = bpf_map_lookup_elem(&ctx_map, &key);
+
+ /* Transfer ctx to map to be freed via implicit dtor call
+ * on cleanup.
+ */
+ if (ctx_val)
+ ctx = bpf_kptr_xchg(&ctx_val->ctx, ctx);
+ if (ctx) {
+ bpf_testmod_ctx_release(ctx);
+ err = -1;
+ }
+ }
+ return err;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
new file mode 100644
index 000000000000..8e150e85b50d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+extern const int bpf_prog_active __ksym;
+int active_res = -1;
+int sk_state_res = -1;
+
+int __noinline f1(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+ int *active;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ active = (int *)bpf_per_cpu_ptr(&bpf_prog_active,
+ bpf_get_smp_processor_id());
+ if (active)
+ active_res = *active;
+
+ sk_state_res = bpf_kfunc_call_test3((struct sock *)sk)->__sk_common.skc_state;
+
+ return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4);
+}
+
+SEC("tc")
+int kfunc_call_test1(struct __sk_buff *skb)
+{
+ return f1(skb);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_module_order.c b/tools/testing/selftests/bpf/progs/kfunc_module_order.c
new file mode 100644
index 000000000000..76003d04c95f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_module_order.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+extern int bpf_test_modorder_retx(void) __ksym;
+extern int bpf_test_modorder_rety(void) __ksym;
+
+SEC("classifier")
+int call_kfunc_xy(struct __sk_buff *skb)
+{
+ int ret1, ret2;
+
+ ret1 = bpf_test_modorder_retx();
+ ret2 = bpf_test_modorder_rety();
+
+ return ret1 == 'x' && ret2 == 'y' ? 0 : -1;
+}
+
+SEC("classifier")
+int call_kfunc_yx(struct __sk_buff *skb)
+{
+ int ret1, ret2;
+
+ ret1 = bpf_test_modorder_rety();
+ ret2 = bpf_test_modorder_retx();
+
+ return ret1 == 'y' && ret2 == 'x' ? 0 : -1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
new file mode 100644
index 000000000000..b9c8f9457492
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define SLAB_NAME_MAX 32
+
+struct kmem_cache_result {
+ char name[SLAB_NAME_MAX];
+ long obj_size;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(void *));
+ __uint(value_size, SLAB_NAME_MAX);
+ __uint(max_entries, 1);
+} slab_hash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(struct kmem_cache_result));
+ __uint(max_entries, 1024);
+} slab_result SEC(".maps");
+
+extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym;
+
+/* Result, will be checked by userspace */
+int task_struct_found;
+int kmem_cache_seen;
+int open_coded_seen;
+
+SEC("iter/kmem_cache")
+int slab_info_collector(struct bpf_iter__kmem_cache *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct kmem_cache *s = ctx->s;
+ struct kmem_cache_result *r;
+ int idx;
+
+ if (s) {
+ /* To make sure if the slab_iter implements the seq interface
+ * properly and it's also useful for debugging.
+ */
+ BPF_SEQ_PRINTF(seq, "%s: %u\n", s->name, s->size);
+
+ idx = kmem_cache_seen;
+ r = bpf_map_lookup_elem(&slab_result, &idx);
+ if (r == NULL)
+ return 0;
+
+ kmem_cache_seen++;
+
+ /* Save name and size to match /proc/slabinfo */
+ bpf_probe_read_kernel_str(r->name, sizeof(r->name), s->name);
+ r->obj_size = s->size;
+
+ if (!bpf_strncmp(r->name, 11, "task_struct"))
+ bpf_map_update_elem(&slab_hash, &s, r->name, BPF_NOEXIST);
+ }
+
+ return 0;
+}
+
+SEC("raw_tp/bpf_test_finish")
+int BPF_PROG(check_task_struct)
+{
+ u64 curr = bpf_get_current_task();
+ struct kmem_cache *s;
+ char *name;
+
+ s = bpf_get_kmem_cache(curr);
+ if (s == NULL) {
+ task_struct_found = -1;
+ return 0;
+ }
+ name = bpf_map_lookup_elem(&slab_hash, &s);
+ if (name && !bpf_strncmp(name, 11, "task_struct"))
+ task_struct_found = 1;
+ else
+ task_struct_found = -2;
+ return 0;
+}
+
+SEC("syscall")
+int open_coded_iter(const void *ctx)
+{
+ struct kmem_cache *s;
+
+ bpf_for_each(kmem_cache, s) {
+ struct kmem_cache_result *r;
+
+ r = bpf_map_lookup_elem(&slab_result, &open_coded_seen);
+ if (!r)
+ break;
+
+ if (r->obj_size != s->size)
+ break;
+
+ open_coded_seen++;
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c
new file mode 100644
index 000000000000..9e1ca8e34913
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+
+char _license[] SEC("license") = "GPL";
+
+extern const void bpf_fentry_test1 __ksym;
+extern const void bpf_fentry_test2 __ksym;
+extern const void bpf_fentry_test3 __ksym;
+extern const void bpf_fentry_test4 __ksym;
+extern const void bpf_fentry_test5 __ksym;
+extern const void bpf_fentry_test6 __ksym;
+extern const void bpf_fentry_test7 __ksym;
+extern const void bpf_fentry_test8 __ksym;
+
+int pid = 0;
+bool test_cookie = false;
+
+__u64 kprobe_test1_result = 0;
+__u64 kprobe_test2_result = 0;
+__u64 kprobe_test3_result = 0;
+__u64 kprobe_test4_result = 0;
+__u64 kprobe_test5_result = 0;
+__u64 kprobe_test6_result = 0;
+__u64 kprobe_test7_result = 0;
+__u64 kprobe_test8_result = 0;
+
+__u64 kretprobe_test1_result = 0;
+__u64 kretprobe_test2_result = 0;
+__u64 kretprobe_test3_result = 0;
+__u64 kretprobe_test4_result = 0;
+__u64 kretprobe_test5_result = 0;
+__u64 kretprobe_test6_result = 0;
+__u64 kretprobe_test7_result = 0;
+__u64 kretprobe_test8_result = 0;
+
+static void kprobe_multi_check(void *ctx, bool is_return)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
+ __u64 addr = bpf_get_func_ip(ctx);
+
+#define SET(__var, __addr, __cookie) ({ \
+ if (((const void *) addr == __addr) && \
+ (!test_cookie || (cookie == __cookie))) \
+ __var = 1; \
+})
+
+ if (is_return) {
+ SET(kretprobe_test1_result, &bpf_fentry_test1, 8);
+ SET(kretprobe_test2_result, &bpf_fentry_test2, 2);
+ SET(kretprobe_test3_result, &bpf_fentry_test3, 7);
+ SET(kretprobe_test4_result, &bpf_fentry_test4, 6);
+ SET(kretprobe_test5_result, &bpf_fentry_test5, 5);
+ SET(kretprobe_test6_result, &bpf_fentry_test6, 4);
+ SET(kretprobe_test7_result, &bpf_fentry_test7, 3);
+ SET(kretprobe_test8_result, &bpf_fentry_test8, 1);
+ } else {
+ SET(kprobe_test1_result, &bpf_fentry_test1, 1);
+ SET(kprobe_test2_result, &bpf_fentry_test2, 7);
+ SET(kprobe_test3_result, &bpf_fentry_test3, 2);
+ SET(kprobe_test4_result, &bpf_fentry_test4, 3);
+ SET(kprobe_test5_result, &bpf_fentry_test5, 4);
+ SET(kprobe_test6_result, &bpf_fentry_test6, 5);
+ SET(kprobe_test7_result, &bpf_fentry_test7, 6);
+ SET(kprobe_test8_result, &bpf_fentry_test8, 8);
+ }
+
+#undef SET
+}
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe.multi/bpf_fentry_tes??")
+int test_kprobe(struct pt_regs *ctx)
+{
+ kprobe_multi_check(ctx, false);
+ return 0;
+}
+
+SEC("kretprobe.multi/bpf_fentry_test*")
+int test_kretprobe(struct pt_regs *ctx)
+{
+ kprobe_multi_check(ctx, true);
+ return 0;
+}
+
+SEC("kprobe.multi")
+int test_kprobe_manual(struct pt_regs *ctx)
+{
+ kprobe_multi_check(ctx, false);
+ return 0;
+}
+
+SEC("kretprobe.multi")
+int test_kretprobe_manual(struct pt_regs *ctx)
+{
+ kprobe_multi_check(ctx, true);
+ return 0;
+}
+
+extern const void bpf_testmod_fentry_test1 __ksym;
+extern const void bpf_testmod_fentry_test2 __ksym;
+extern const void bpf_testmod_fentry_test3 __ksym;
+
+__u64 kprobe_testmod_test1_result = 0;
+__u64 kprobe_testmod_test2_result = 0;
+__u64 kprobe_testmod_test3_result = 0;
+
+__u64 kretprobe_testmod_test1_result = 0;
+__u64 kretprobe_testmod_test2_result = 0;
+__u64 kretprobe_testmod_test3_result = 0;
+
+static void kprobe_multi_testmod_check(void *ctx, bool is_return)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ if (is_return) {
+ if ((const void *) addr == &bpf_testmod_fentry_test1)
+ kretprobe_testmod_test1_result = 1;
+ if ((const void *) addr == &bpf_testmod_fentry_test2)
+ kretprobe_testmod_test2_result = 1;
+ if ((const void *) addr == &bpf_testmod_fentry_test3)
+ kretprobe_testmod_test3_result = 1;
+ } else {
+ if ((const void *) addr == &bpf_testmod_fentry_test1)
+ kprobe_testmod_test1_result = 1;
+ if ((const void *) addr == &bpf_testmod_fentry_test2)
+ kprobe_testmod_test2_result = 1;
+ if ((const void *) addr == &bpf_testmod_fentry_test3)
+ kprobe_testmod_test3_result = 1;
+ }
+}
+
+SEC("kprobe.multi")
+int test_kprobe_testmod(struct pt_regs *ctx)
+{
+ kprobe_multi_testmod_check(ctx, false);
+ return 0;
+}
+
+SEC("kretprobe.multi")
+int test_kretprobe_testmod(struct pt_regs *ctx)
+{
+ kprobe_multi_testmod_check(ctx, true);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_empty.c b/tools/testing/selftests/bpf/progs/kprobe_multi_empty.c
new file mode 100644
index 000000000000..e76e499aca39
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_empty.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("kprobe.multi/")
+int test_kprobe_empty(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_override.c b/tools/testing/selftests/bpf/progs/kprobe_multi_override.c
new file mode 100644
index 000000000000..28f8487c9059
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_override.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("kprobe.multi")
+int test_override(struct pt_regs *ctx)
+{
+ bpf_override_return(ctx, 123);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
new file mode 100644
index 000000000000..bd8b7fb7061e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+extern const void bpf_fentry_test1 __ksym;
+extern const void bpf_fentry_test2 __ksym;
+extern const void bpf_fentry_test3 __ksym;
+extern const void bpf_fentry_test4 __ksym;
+extern const void bpf_fentry_test5 __ksym;
+extern const void bpf_fentry_test6 __ksym;
+extern const void bpf_fentry_test7 __ksym;
+extern const void bpf_fentry_test8 __ksym;
+
+int pid = 0;
+
+__u64 kprobe_session_result[8];
+
+static int session_check(void *ctx)
+{
+ unsigned int i;
+ __u64 addr;
+ const void *kfuncs[] = {
+ &bpf_fentry_test1,
+ &bpf_fentry_test2,
+ &bpf_fentry_test3,
+ &bpf_fentry_test4,
+ &bpf_fentry_test5,
+ &bpf_fentry_test6,
+ &bpf_fentry_test7,
+ &bpf_fentry_test8,
+ };
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ addr = bpf_get_func_ip(ctx);
+
+ for (i = 0; i < ARRAY_SIZE(kfuncs); i++) {
+ if (kfuncs[i] == (void *) addr) {
+ kprobe_session_result[i]++;
+ break;
+ }
+ }
+
+ /*
+ * Force probes for function bpf_fentry_test[5-8] not to
+ * install and execute the return probe
+ */
+ if (((const void *) addr == &bpf_fentry_test5) ||
+ ((const void *) addr == &bpf_fentry_test6) ||
+ ((const void *) addr == &bpf_fentry_test7) ||
+ ((const void *) addr == &bpf_fentry_test8))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe.session/bpf_fentry_test*")
+int test_kprobe(struct pt_regs *ctx)
+{
+ return session_check(ctx);
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c
new file mode 100644
index 000000000000..0835b5edf685
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+
+char _license[] SEC("license") = "GPL";
+
+int pid = 0;
+
+__u64 test_kprobe_1_result = 0;
+__u64 test_kprobe_2_result = 0;
+__u64 test_kprobe_3_result = 0;
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+static int check_cookie(__u64 val, __u64 *result)
+{
+ __u64 *cookie;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ cookie = bpf_session_cookie();
+
+ if (bpf_session_is_return())
+ *result = *cookie == val ? val : 0;
+ else
+ *cookie = val;
+ return 0;
+}
+
+SEC("kprobe.session/bpf_fentry_test1")
+int test_kprobe_1(struct pt_regs *ctx)
+{
+ return check_cookie(1, &test_kprobe_1_result);
+}
+
+SEC("kprobe.session/bpf_fentry_test1")
+int test_kprobe_2(struct pt_regs *ctx)
+{
+ return check_cookie(2, &test_kprobe_2_result);
+}
+
+SEC("kprobe.session/bpf_fentry_test1")
+int test_kprobe_3(struct pt_regs *ctx)
+{
+ return check_cookie(3, &test_kprobe_3_result);
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c b/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c
new file mode 100644
index 000000000000..288577e81deb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/usdt.bpf.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+
+SEC("kprobe.session")
+__success
+int kprobe_session_return_0(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe.session")
+__success
+int kprobe_session_return_1(struct pt_regs *ctx)
+{
+ return 1;
+}
+
+SEC("kprobe.session")
+__failure
+__msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]")
+int kprobe_session_return_2(struct pt_regs *ctx)
+{
+ return 2;
+}
diff --git a/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
new file mode 100644
index 000000000000..2414ac20b6d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct bin_data {
+ char blob[32];
+};
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+private(kptr) struct bin_data __kptr * ptr;
+
+SEC("tc")
+__naked int kptr_xchg_inline(void)
+{
+ asm volatile (
+ "r1 = %[ptr] ll;"
+ "r2 = 0;"
+ "call %[bpf_kptr_xchg];"
+ "if r0 == 0 goto 1f;"
+ "r1 = r0;"
+ "r2 = 0;"
+ "call %[bpf_obj_drop_impl];"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_addr(ptr),
+ __imm(bpf_kptr_xchg),
+ __imm(bpf_obj_drop_impl)
+ : __clobber_all
+ );
+}
+
+/* BTF FUNC records are not generated for kfuncs referenced
+ * from inline assembly. These records are necessary for
+ * libbpf to link the program. The function below is a hack
+ * to ensure that BTF FUNC records are generated.
+ */
+void __btf_root(void)
+{
+ bpf_obj_drop(NULL);
+}
diff --git a/tools/testing/selftests/bpf/progs/ksym_race.c b/tools/testing/selftests/bpf/progs/ksym_race.c
new file mode 100644
index 000000000000..def97f2fed90
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/ksym_race.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+extern int bpf_testmod_ksym_percpu __ksym;
+
+SEC("tc")
+int ksym_fail(struct __sk_buff *ctx)
+{
+ return *(int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c
new file mode 100644
index 000000000000..049a1f78de3f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_funcs1.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+/* weak and shared between two files */
+const volatile __u32 my_tid __weak;
+long syscall_id __weak;
+
+int output_val1;
+int output_ctx1;
+int output_weak1;
+
+/* same "subprog" name in all files, but it's ok because they all are static */
+static __noinline int subprog(int x)
+{
+ /* but different formula */
+ return x * 1;
+}
+
+/* Global functions can't be void */
+int set_output_val1(int x)
+{
+ output_val1 = x + subprog(x);
+ return x;
+}
+
+/* This function can't be verified as global, as it assumes raw_tp/sys_enter
+ * context and accesses syscall id (second argument). So we mark it as
+ * __hidden, so that libbpf will mark it as static in the final object file,
+ * right before verifying it in the kernel.
+ *
+ * But we don't mark it as __hidden here, rather at extern site. __hidden is
+ * "contaminating" visibility, so it will get propagated from either extern or
+ * actual definition (including from the losing __weak definition).
+ */
+void set_output_ctx1(__u64 *ctx)
+{
+ output_ctx1 = ctx[1]; /* long id, same as in BPF_PROG below */
+}
+
+/* this weak instance should win because it's the first one */
+__weak int set_output_weak(int x)
+{
+ static volatile int whatever;
+
+ /* make sure we use CO-RE relocations in a weak function, this used to
+ * cause problems for BPF static linker
+ */
+ whatever = bpf_core_type_size(struct task_struct);
+ __sink(whatever);
+
+ output_weak1 = x;
+ return x;
+}
+
+extern int set_output_val2(int x);
+
+/* here we'll force set_output_ctx2() to be __hidden in the final obj file */
+__hidden extern void set_output_ctx2(__u64 *ctx);
+
+void *bpf_cast_to_kern_ctx(void *obj) __ksym;
+
+SEC("?raw_tp/sys_enter")
+int BPF_PROG(handler1, struct pt_regs *regs, long id)
+{
+ static volatile int whatever;
+
+ if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
+ return 0;
+
+ /* make sure we have CO-RE relocations in main program */
+ whatever = bpf_core_type_size(struct task_struct);
+ __sink(whatever);
+
+ set_output_val2(1000);
+ set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */
+
+ /* keep input value the same across both files to avoid dependency on
+ * handler call order; differentiate by output_weak1 vs output_weak2.
+ */
+ set_output_weak(42);
+
+ return 0;
+}
+
+/* Generate BTF FUNC record and test linking with duplicate extern functions */
+void kfunc_gen1(void)
+{
+ bpf_cast_to_kern_ctx(0);
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c
new file mode 100644
index 000000000000..96850759fd8d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_funcs2.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+/* weak and shared between both files */
+const volatile int my_tid __weak;
+long syscall_id __weak;
+
+int output_val2;
+int output_ctx2;
+int output_weak2; /* should stay zero */
+
+/* same "subprog" name in all files, but it's ok because they all are static */
+static __noinline int subprog(int x)
+{
+ /* but different formula */
+ return x * 2;
+}
+
+/* Global functions can't be void */
+int set_output_val2(int x)
+{
+ output_val2 = 2 * x + 2 * subprog(x);
+ return 2 * x;
+}
+
+/* This function can't be verified as global, as it assumes raw_tp/sys_enter
+ * context and accesses syscall id (second argument). So we mark it as
+ * __hidden, so that libbpf will mark it as static in the final object file,
+ * right before verifying it in the kernel.
+ *
+ * But we don't mark it as __hidden here, rather at extern site. __hidden is
+ * "contaminating" visibility, so it will get propagated from either extern or
+ * actual definition (including from the losing __weak definition).
+ */
+void set_output_ctx2(__u64 *ctx)
+{
+ output_ctx2 = ctx[1]; /* long id, same as in BPF_PROG below */
+}
+
+/* this weak instance should lose, because it will be processed second */
+__weak int set_output_weak(int x)
+{
+ static volatile int whatever;
+
+ /* make sure we use CO-RE relocations in a weak function, this used to
+ * cause problems for BPF static linker
+ */
+ whatever = 2 * bpf_core_type_size(struct task_struct);
+ __sink(whatever);
+
+ output_weak2 = x;
+ return 2 * x;
+}
+
+extern int set_output_val1(int x);
+
+/* here we'll force set_output_ctx1() to be __hidden in the final obj file */
+__hidden extern void set_output_ctx1(__u64 *ctx);
+
+void *bpf_cast_to_kern_ctx(void *obj) __ksym;
+
+SEC("?raw_tp/sys_enter")
+int BPF_PROG(handler2, struct pt_regs *regs, long id)
+{
+ static volatile int whatever;
+
+ if (my_tid != (s32)bpf_get_current_pid_tgid() || id != syscall_id)
+ return 0;
+
+ /* make sure we have CO-RE relocations in main program */
+ whatever = bpf_core_type_size(struct task_struct);
+ __sink(whatever);
+
+ set_output_val1(2000);
+ set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */
+
+ /* keep input value the same across both files to avoid dependency on
+ * handler call order; differentiate by output_weak1 vs output_weak2.
+ */
+ set_output_weak(42);
+
+ return 0;
+}
+
+/* Generate BTF FUNC record and test linking with duplicate extern functions */
+void kfunc_gen2(void)
+{
+ bpf_cast_to_kern_ctx(0);
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c
new file mode 100644
index 000000000000..421f40835acd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_list.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+#include "linked_list.h"
+
+struct head_nested_inner {
+ struct bpf_spin_lock lock;
+ struct bpf_list_head head __contains(foo, node2);
+};
+
+struct head_nested {
+ int dummy;
+ struct head_nested_inner inner;
+};
+
+private(C) struct bpf_spin_lock glock_c;
+private(C) struct bpf_list_head ghead_array[2] __contains(foo, node2);
+private(C) struct bpf_list_head ghead_array_one[1] __contains(foo, node2);
+
+private(D) struct head_nested ghead_nested;
+
+static __always_inline
+int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
+{
+ struct bpf_list_node *n;
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 2;
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node2));
+ bpf_obj_drop(f);
+ return 3;
+ }
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node2));
+ bpf_obj_drop(f);
+ return 4;
+ }
+
+
+ bpf_spin_lock(lock);
+ f->data = 42;
+ bpf_list_push_front(head, &f->node2);
+ bpf_spin_unlock(lock);
+ if (leave_in_map)
+ return 0;
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 5;
+ f = container_of(n, struct foo, node2);
+ if (f->data != 42) {
+ bpf_obj_drop(f);
+ return 6;
+ }
+
+ bpf_spin_lock(lock);
+ f->data = 13;
+ bpf_list_push_front(head, &f->node2);
+ bpf_spin_unlock(lock);
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 7;
+ f = container_of(n, struct foo, node2);
+ if (f->data != 13) {
+ bpf_obj_drop(f);
+ return 8;
+ }
+ bpf_obj_drop(f);
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node2));
+ return 9;
+ }
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node2));
+ return 10;
+ }
+ return 0;
+}
+
+
+static __always_inline
+int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
+{
+ struct bpf_list_node *n;
+ struct foo *f[200], *pf;
+ int i;
+
+ /* Loop following this check adds nodes 2-at-a-time in order to
+ * validate multiple release_on_unlock release logic
+ */
+ if (ARRAY_SIZE(f) % 2)
+ return 10;
+
+ for (i = 0; i < ARRAY_SIZE(f); i += 2) {
+ f[i] = bpf_obj_new(typeof(**f));
+ if (!f[i])
+ return 2;
+ f[i]->data = i;
+
+ f[i + 1] = bpf_obj_new(typeof(**f));
+ if (!f[i + 1]) {
+ bpf_obj_drop(f[i]);
+ return 9;
+ }
+ f[i + 1]->data = i + 1;
+
+ bpf_spin_lock(lock);
+ bpf_list_push_front(head, &f[i]->node2);
+ bpf_list_push_front(head, &f[i + 1]->node2);
+ bpf_spin_unlock(lock);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(f); i++) {
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 3;
+ pf = container_of(n, struct foo, node2);
+ if (pf->data != (ARRAY_SIZE(f) - i - 1)) {
+ bpf_obj_drop(pf);
+ return 4;
+ }
+ bpf_spin_lock(lock);
+ bpf_list_push_back(head, &pf->node2);
+ bpf_spin_unlock(lock);
+ }
+
+ if (leave_in_map)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(f); i++) {
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 5;
+ pf = container_of(n, struct foo, node2);
+ if (pf->data != i) {
+ bpf_obj_drop(pf);
+ return 6;
+ }
+ bpf_obj_drop(pf);
+ }
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node2));
+ return 7;
+ }
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node2));
+ return 8;
+ }
+ return 0;
+}
+
+static __always_inline
+int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
+{
+ struct bpf_list_node *n;
+ struct bar *ba[8], *b;
+ struct foo *f;
+ int i;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 2;
+ for (i = 0; i < ARRAY_SIZE(ba); i++) {
+ b = bpf_obj_new(typeof(*b));
+ if (!b) {
+ bpf_obj_drop(f);
+ return 3;
+ }
+ b->data = i;
+ bpf_spin_lock(&f->lock);
+ bpf_list_push_back(&f->head, &b->node);
+ bpf_spin_unlock(&f->lock);
+ }
+
+ bpf_spin_lock(lock);
+ f->data = 42;
+ bpf_list_push_front(head, &f->node2);
+ bpf_spin_unlock(lock);
+
+ if (leave_in_map)
+ return 0;
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 4;
+ f = container_of(n, struct foo, node2);
+ if (f->data != 42) {
+ bpf_obj_drop(f);
+ return 5;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ba); i++) {
+ bpf_spin_lock(&f->lock);
+ n = bpf_list_pop_front(&f->head);
+ bpf_spin_unlock(&f->lock);
+ if (!n) {
+ bpf_obj_drop(f);
+ return 6;
+ }
+ b = container_of(n, struct bar, node);
+ if (b->data != i) {
+ bpf_obj_drop(f);
+ bpf_obj_drop(b);
+ return 7;
+ }
+ bpf_obj_drop(b);
+ }
+ bpf_spin_lock(&f->lock);
+ n = bpf_list_pop_front(&f->head);
+ bpf_spin_unlock(&f->lock);
+ if (n) {
+ bpf_obj_drop(f);
+ bpf_obj_drop(container_of(n, struct bar, node));
+ return 8;
+ }
+ bpf_obj_drop(f);
+ return 0;
+}
+
+static __always_inline
+int test_list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head)
+{
+ int ret;
+
+ ret = list_push_pop(lock, head, false);
+ if (ret)
+ return ret;
+ return list_push_pop(lock, head, true);
+}
+
+static __always_inline
+int test_list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head)
+{
+ int ret;
+
+ ret = list_push_pop_multiple(lock, head, false);
+ if (ret)
+ return ret;
+ return list_push_pop_multiple(lock, head, true);
+}
+
+static __always_inline
+int test_list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head)
+{
+ int ret;
+
+ ret = list_in_list(lock, head, false);
+ if (ret)
+ return ret;
+ return list_in_list(lock, head, true);
+}
+
+SEC("tc")
+int map_list_push_pop(void *ctx)
+{
+ struct map_value *v;
+
+ v = bpf_map_lookup_elem(&array_map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_push_pop(&v->lock, &v->head);
+}
+
+SEC("tc")
+int inner_map_list_push_pop(void *ctx)
+{
+ struct map_value *v;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
+ if (!map)
+ return 1;
+ v = bpf_map_lookup_elem(map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_push_pop(&v->lock, &v->head);
+}
+
+SEC("tc")
+int global_list_push_pop(void *ctx)
+{
+ return test_list_push_pop(&glock, &ghead);
+}
+
+SEC("tc")
+int global_list_push_pop_nested(void *ctx)
+{
+ return test_list_push_pop(&ghead_nested.inner.lock, &ghead_nested.inner.head);
+}
+
+SEC("tc")
+int global_list_array_push_pop(void *ctx)
+{
+ int r;
+
+ r = test_list_push_pop(&glock_c, &ghead_array[0]);
+ if (r)
+ return r;
+
+ r = test_list_push_pop(&glock_c, &ghead_array[1]);
+ if (r)
+ return r;
+
+ /* Arrays with only one element is a special case, being treated
+ * just like a bpf_list_head variable by the verifier, not an
+ * array.
+ */
+ return test_list_push_pop(&glock_c, &ghead_array_one[0]);
+}
+
+SEC("tc")
+int map_list_push_pop_multiple(void *ctx)
+{
+ struct map_value *v;
+
+ v = bpf_map_lookup_elem(&array_map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_push_pop_multiple(&v->lock, &v->head);
+}
+
+SEC("tc")
+int inner_map_list_push_pop_multiple(void *ctx)
+{
+ struct map_value *v;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
+ if (!map)
+ return 1;
+ v = bpf_map_lookup_elem(map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_push_pop_multiple(&v->lock, &v->head);
+}
+
+SEC("tc")
+int global_list_push_pop_multiple(void *ctx)
+{
+ int ret;
+
+ ret = list_push_pop_multiple(&glock, &ghead, false);
+ if (ret)
+ return ret;
+ return list_push_pop_multiple(&glock, &ghead, true);
+}
+
+SEC("tc")
+int map_list_in_list(void *ctx)
+{
+ struct map_value *v;
+
+ v = bpf_map_lookup_elem(&array_map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_in_list(&v->lock, &v->head);
+}
+
+SEC("tc")
+int inner_map_list_in_list(void *ctx)
+{
+ struct map_value *v;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
+ if (!map)
+ return 1;
+ v = bpf_map_lookup_elem(map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_in_list(&v->lock, &v->head);
+}
+
+SEC("tc")
+int global_list_in_list(void *ctx)
+{
+ return test_list_in_list(&glock, &ghead);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_list.h b/tools/testing/selftests/bpf/progs/linked_list.h
new file mode 100644
index 000000000000..c0f3609a7ffa
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_list.h
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef LINKED_LIST_H
+#define LINKED_LIST_H
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+
+struct bar {
+ struct bpf_list_node node;
+ int data;
+};
+
+struct foo {
+ struct bpf_list_node node;
+ struct bpf_list_head head __contains(bar, node);
+ struct bpf_spin_lock lock;
+ int data;
+ struct bpf_list_node node2;
+};
+
+struct map_value {
+ struct bpf_spin_lock lock;
+ int data;
+ struct bpf_list_head head __contains(foo, node2);
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+};
+
+struct array_map array_map SEC(".maps");
+struct array_map inner_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct array_map);
+} map_of_maps SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_list_head ghead __contains(foo, node2);
+private(B) struct bpf_spin_lock glock2;
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c
new file mode 100644
index 000000000000..6438982b928b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+#include "linked_list.h"
+
+#define INIT \
+ struct map_value *v, *v2, *iv, *iv2; \
+ struct foo *f, *f1, *f2; \
+ struct bar *b; \
+ void *map; \
+ \
+ map = bpf_map_lookup_elem(&map_of_maps, &(int){ 0 }); \
+ if (!map) \
+ return 0; \
+ v = bpf_map_lookup_elem(&array_map, &(int){ 0 }); \
+ if (!v) \
+ return 0; \
+ v2 = bpf_map_lookup_elem(&array_map, &(int){ 0 }); \
+ if (!v2) \
+ return 0; \
+ iv = bpf_map_lookup_elem(map, &(int){ 0 }); \
+ if (!iv) \
+ return 0; \
+ iv2 = bpf_map_lookup_elem(map, &(int){ 0 }); \
+ if (!iv2) \
+ return 0; \
+ f = bpf_obj_new(typeof(*f)); \
+ if (!f) \
+ return 0; \
+ f1 = f; \
+ f2 = bpf_obj_new(typeof(*f2)); \
+ if (!f2) { \
+ bpf_obj_drop(f1); \
+ return 0; \
+ } \
+ b = bpf_obj_new(typeof(*b)); \
+ if (!b) { \
+ bpf_obj_drop(f2); \
+ bpf_obj_drop(f1); \
+ return 0; \
+ }
+
+#define CHECK(test, op, hexpr) \
+ SEC("?tc") \
+ int test##_missing_lock_##op(void *ctx) \
+ { \
+ INIT; \
+ void (*p)(void *) = (void *)&bpf_list_##op; \
+ p(hexpr); \
+ return 0; \
+ }
+
+CHECK(kptr, pop_front, &f->head);
+CHECK(kptr, pop_back, &f->head);
+
+CHECK(global, pop_front, &ghead);
+CHECK(global, pop_back, &ghead);
+
+CHECK(map, pop_front, &v->head);
+CHECK(map, pop_back, &v->head);
+
+CHECK(inner_map, pop_front, &iv->head);
+CHECK(inner_map, pop_back, &iv->head);
+
+#undef CHECK
+
+#define CHECK(test, op, hexpr, nexpr) \
+ SEC("?tc") \
+ int test##_missing_lock_##op(void *ctx) \
+ { \
+ INIT; \
+ bpf_list_##op(hexpr, nexpr); \
+ return 0; \
+ }
+
+CHECK(kptr, push_front, &f->head, &b->node);
+CHECK(kptr, push_back, &f->head, &b->node);
+
+CHECK(global, push_front, &ghead, &f->node2);
+CHECK(global, push_back, &ghead, &f->node2);
+
+CHECK(map, push_front, &v->head, &f->node2);
+CHECK(map, push_back, &v->head, &f->node2);
+
+CHECK(inner_map, push_front, &iv->head, &f->node2);
+CHECK(inner_map, push_back, &iv->head, &f->node2);
+
+#undef CHECK
+
+#define CHECK(test, op, lexpr, hexpr) \
+ SEC("?tc") \
+ int test##_incorrect_lock_##op(void *ctx) \
+ { \
+ INIT; \
+ void (*p)(void *) = (void *)&bpf_list_##op; \
+ bpf_spin_lock(lexpr); \
+ p(hexpr); \
+ return 0; \
+ }
+
+#define CHECK_OP(op) \
+ CHECK(kptr_kptr, op, &f1->lock, &f2->head); \
+ CHECK(kptr_global, op, &f1->lock, &ghead); \
+ CHECK(kptr_map, op, &f1->lock, &v->head); \
+ CHECK(kptr_inner_map, op, &f1->lock, &iv->head); \
+ \
+ CHECK(global_global, op, &glock2, &ghead); \
+ CHECK(global_kptr, op, &glock, &f1->head); \
+ CHECK(global_map, op, &glock, &v->head); \
+ CHECK(global_inner_map, op, &glock, &iv->head); \
+ \
+ CHECK(map_map, op, &v->lock, &v2->head); \
+ CHECK(map_kptr, op, &v->lock, &f2->head); \
+ CHECK(map_global, op, &v->lock, &ghead); \
+ CHECK(map_inner_map, op, &v->lock, &iv->head); \
+ \
+ CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head); \
+ CHECK(inner_map_kptr, op, &iv->lock, &f2->head); \
+ CHECK(inner_map_global, op, &iv->lock, &ghead); \
+ CHECK(inner_map_map, op, &iv->lock, &v->head);
+
+CHECK_OP(pop_front);
+CHECK_OP(pop_back);
+
+#undef CHECK
+#undef CHECK_OP
+
+#define CHECK(test, op, lexpr, hexpr, nexpr) \
+ SEC("?tc") \
+ int test##_incorrect_lock_##op(void *ctx) \
+ { \
+ INIT; \
+ bpf_spin_lock(lexpr); \
+ bpf_list_##op(hexpr, nexpr); \
+ return 0; \
+ }
+
+#define CHECK_OP(op) \
+ CHECK(kptr_kptr, op, &f1->lock, &f2->head, &b->node); \
+ CHECK(kptr_global, op, &f1->lock, &ghead, &f->node2); \
+ CHECK(kptr_map, op, &f1->lock, &v->head, &f->node2); \
+ CHECK(kptr_inner_map, op, &f1->lock, &iv->head, &f->node2); \
+ \
+ CHECK(global_global, op, &glock2, &ghead, &f->node2); \
+ CHECK(global_kptr, op, &glock, &f1->head, &b->node); \
+ CHECK(global_map, op, &glock, &v->head, &f->node2); \
+ CHECK(global_inner_map, op, &glock, &iv->head, &f->node2); \
+ \
+ CHECK(map_map, op, &v->lock, &v2->head, &f->node2); \
+ CHECK(map_kptr, op, &v->lock, &f2->head, &b->node); \
+ CHECK(map_global, op, &v->lock, &ghead, &f->node2); \
+ CHECK(map_inner_map, op, &v->lock, &iv->head, &f->node2); \
+ \
+ CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, &f->node2);\
+ CHECK(inner_map_kptr, op, &iv->lock, &f2->head, &b->node); \
+ CHECK(inner_map_global, op, &iv->lock, &ghead, &f->node2); \
+ CHECK(inner_map_map, op, &iv->lock, &v->head, &f->node2);
+
+CHECK_OP(push_front);
+CHECK_OP(push_back);
+
+#undef CHECK
+#undef CHECK_OP
+#undef INIT
+
+SEC("?kprobe/xyz")
+int map_compat_kprobe(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?kretprobe/xyz")
+int map_compat_kretprobe(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?tracepoint/xyz")
+int map_compat_tp(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?perf_event")
+int map_compat_perf(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?raw_tp/xyz")
+int map_compat_raw_tp(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?raw_tp.w/xyz")
+int map_compat_raw_tp_w(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?tc")
+int obj_type_id_oor(void *ctx)
+{
+ bpf_obj_new_impl(~0UL, NULL);
+ return 0;
+}
+
+SEC("?tc")
+int obj_new_no_composite(void *ctx)
+{
+ bpf_obj_new_impl(bpf_core_type_id_local(int), (void *)42);
+ return 0;
+}
+
+SEC("?tc")
+int obj_new_no_struct(void *ctx)
+{
+
+ bpf_obj_new(union { int data; unsigned udata; });
+ return 0;
+}
+
+SEC("?tc")
+int obj_drop_non_zero_off(void *ctx)
+{
+ void *f;
+
+ f = bpf_obj_new(struct foo);
+ if (!f)
+ return 0;
+ bpf_obj_drop(f+1);
+ return 0;
+}
+
+SEC("?tc")
+int new_null_ret(void *ctx)
+{
+ return bpf_obj_new(struct foo)->data;
+}
+
+SEC("?tc")
+int obj_new_acq(void *ctx)
+{
+ bpf_obj_new(struct foo);
+ return 0;
+}
+
+SEC("?tc")
+int use_after_drop(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_obj_drop(f);
+ return f->data;
+}
+
+SEC("?tc")
+int ptr_walk_scalar(void *ctx)
+{
+ struct test1 {
+ struct test2 {
+ struct test2 *next;
+ } *ptr;
+ } *p;
+
+ p = bpf_obj_new(typeof(*p));
+ if (!p)
+ return 0;
+ bpf_this_cpu_ptr(p->ptr);
+ return 0;
+}
+
+SEC("?tc")
+int direct_read_lock(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ return *(int *)&f->lock;
+}
+
+SEC("?tc")
+int direct_write_lock(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ *(int *)&f->lock = 0;
+ return 0;
+}
+
+SEC("?tc")
+int direct_read_head(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ return *(int *)&f->head;
+}
+
+SEC("?tc")
+int direct_write_head(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ *(int *)&f->head = 0;
+ return 0;
+}
+
+SEC("?tc")
+int direct_read_node(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ return *(int *)&f->node2;
+}
+
+SEC("?tc")
+int direct_write_node(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ *(int *)&f->node2 = 0;
+ return 0;
+}
+
+static __always_inline
+int use_after_unlock(bool push_front)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ f->data = 42;
+ if (push_front)
+ bpf_list_push_front(&ghead, &f->node2);
+ else
+ bpf_list_push_back(&ghead, &f->node2);
+ bpf_spin_unlock(&glock);
+
+ return f->data;
+}
+
+SEC("?tc")
+int use_after_unlock_push_front(void *ctx)
+{
+ return use_after_unlock(true);
+}
+
+SEC("?tc")
+int use_after_unlock_push_back(void *ctx)
+{
+ return use_after_unlock(false);
+}
+
+static __always_inline
+int list_double_add(bool push_front)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ if (push_front) {
+ bpf_list_push_front(&ghead, &f->node2);
+ bpf_list_push_front(&ghead, &f->node2);
+ } else {
+ bpf_list_push_back(&ghead, &f->node2);
+ bpf_list_push_back(&ghead, &f->node2);
+ }
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int double_push_front(void *ctx)
+{
+ return list_double_add(true);
+}
+
+SEC("?tc")
+int double_push_back(void *ctx)
+{
+ return list_double_add(false);
+}
+
+SEC("?tc")
+int no_node_value_type(void *ctx)
+{
+ void *p;
+
+ p = bpf_obj_new(struct { int data; });
+ if (!p)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, p);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_value_type(void *ctx)
+{
+ struct bar *b;
+
+ b = bpf_obj_new(typeof(*b));
+ if (!b)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, &b->node);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_node_var_off(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, (void *)&f->node2 + ctx->protocol);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_node_off1(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, (void *)&f->node2 + 1);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_node_off2(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, &f->node);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int no_head_type(void *ctx)
+{
+ void *p;
+
+ p = bpf_obj_new(typeof(struct { int data; }));
+ if (!p)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(p, NULL);
+ bpf_spin_lock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_head_var_off1(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node2);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_head_var_off2(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node2);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_head_off1(void *ctx)
+{
+ struct foo *f;
+ struct bar *b;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ b = bpf_obj_new(typeof(*b));
+ if (!b) {
+ bpf_obj_drop(f);
+ return 0;
+ }
+
+ bpf_spin_lock(&f->lock);
+ bpf_list_push_front((void *)&f->head + 1, &b->node);
+ bpf_spin_unlock(&f->lock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_head_off2(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+
+ bpf_spin_lock(&glock);
+ bpf_list_push_front((void *)&ghead + 1, &f->node2);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+static __always_inline
+int pop_ptr_off(void *(*op)(void *head))
+{
+ struct {
+ struct bpf_list_head head __contains(foo, node2);
+ struct bpf_spin_lock lock;
+ } *p;
+ struct bpf_list_node *n;
+
+ p = bpf_obj_new(typeof(*p));
+ if (!p)
+ return 0;
+ bpf_spin_lock(&p->lock);
+ n = op(&p->head);
+ bpf_spin_unlock(&p->lock);
+
+ if (!n)
+ return 0;
+ bpf_spin_lock((void *)n);
+ return 0;
+}
+
+SEC("?tc")
+int pop_front_off(void *ctx)
+{
+ return pop_ptr_off((void *)bpf_list_pop_front);
+}
+
+SEC("?tc")
+int pop_back_off(void *ctx)
+{
+ return pop_ptr_off((void *)bpf_list_pop_back);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_list_peek.c b/tools/testing/selftests/bpf/progs/linked_list_peek.c
new file mode 100644
index 000000000000..264e81bfb287
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_list_peek.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct node_data {
+ struct bpf_list_node l;
+ int key;
+};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_list_head ghead __contains(node_data, l);
+
+#define list_entry(ptr, type, member) container_of(ptr, type, member)
+#define NR_NODES 16
+
+int zero = 0;
+
+SEC("syscall")
+__retval(0)
+long list_peek(void *ctx)
+{
+ struct bpf_list_node *l_n;
+ struct node_data *n;
+ int i, err = 0;
+
+ bpf_spin_lock(&glock);
+ l_n = bpf_list_front(&ghead);
+ bpf_spin_unlock(&glock);
+ if (l_n)
+ return __LINE__;
+
+ bpf_spin_lock(&glock);
+ l_n = bpf_list_back(&ghead);
+ bpf_spin_unlock(&glock);
+ if (l_n)
+ return __LINE__;
+
+ for (i = zero; i < NR_NODES && can_loop; i++) {
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return __LINE__;
+ n->key = i;
+ bpf_spin_lock(&glock);
+ bpf_list_push_back(&ghead, &n->l);
+ bpf_spin_unlock(&glock);
+ }
+
+ bpf_spin_lock(&glock);
+
+ l_n = bpf_list_front(&ghead);
+ if (!l_n) {
+ err = __LINE__;
+ goto done;
+ }
+
+ n = list_entry(l_n, struct node_data, l);
+ if (n->key != 0) {
+ err = __LINE__;
+ goto done;
+ }
+
+ l_n = bpf_list_back(&ghead);
+ if (!l_n) {
+ err = __LINE__;
+ goto done;
+ }
+
+ n = list_entry(l_n, struct node_data, l);
+ if (n->key != NR_NODES - 1) {
+ err = __LINE__;
+ goto done;
+ }
+
+done:
+ bpf_spin_unlock(&glock);
+ return err;
+}
+
+#define TEST_FB(op, dolock) \
+SEC("syscall") \
+__failure __msg(MSG) \
+long test_##op##_spinlock_##dolock(void *ctx) \
+{ \
+ struct bpf_list_node *l_n; \
+ __u64 jiffies = 0; \
+ \
+ if (dolock) \
+ bpf_spin_lock(&glock); \
+ l_n = bpf_list_##op(&ghead); \
+ if (l_n) \
+ jiffies = bpf_jiffies64(); \
+ if (dolock) \
+ bpf_spin_unlock(&glock); \
+ \
+ return !!jiffies; \
+}
+
+#define MSG "call bpf_list_{{(front|back).+}}; R0{{(_w)?}}=ptr_or_null_node_data(id={{[0-9]+}},non_own_ref"
+TEST_FB(front, true)
+TEST_FB(back, true)
+#undef MSG
+
+#define MSG "bpf_spin_lock at off=0 must be held for bpf_list_head"
+TEST_FB(front, false)
+TEST_FB(back, false)
+#undef MSG
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_maps1.c b/tools/testing/selftests/bpf/progs/linked_maps1.c
new file mode 100644
index 000000000000..00bf1ca95986
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_maps1.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct my_key { long x; };
+struct my_value { long x; };
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct my_key);
+ __type(value, struct my_value);
+ __uint(max_entries, 16);
+} map1 SEC(".maps");
+
+ /* Matches map2 definition in linked_maps2.c. Order of the attributes doesn't
+ * matter.
+ */
+typedef struct {
+ __uint(max_entries, 8);
+ __type(key, int);
+ __type(value, int);
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+} map2_t;
+
+extern map2_t map2 SEC(".maps");
+
+/* This should be the winning map definition, but we have no way of verifying,
+ * so we just make sure that it links and works without errors
+ */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 16);
+} map_weak __weak SEC(".maps");
+
+int output_first1;
+int output_second1;
+int output_weak1;
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler_enter1)
+{
+ /* update values with key = 1 */
+ int key = 1, val = 1;
+ struct my_key key_struct = { .x = 1 };
+ struct my_value val_struct = { .x = 1000 };
+
+ bpf_map_update_elem(&map1, &key_struct, &val_struct, 0);
+ bpf_map_update_elem(&map2, &key, &val, 0);
+ bpf_map_update_elem(&map_weak, &key, &val, 0);
+
+ return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int BPF_PROG(handler_exit1)
+{
+ /* lookup values with key = 2, set in another file */
+ int key = 2, *val;
+ struct my_key key_struct = { .x = 2 };
+ struct my_value *value_struct;
+
+ value_struct = bpf_map_lookup_elem(&map1, &key_struct);
+ if (value_struct)
+ output_first1 = value_struct->x;
+
+ val = bpf_map_lookup_elem(&map2, &key);
+ if (val)
+ output_second1 = *val;
+
+ val = bpf_map_lookup_elem(&map_weak, &key);
+ if (val)
+ output_weak1 = *val;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_maps2.c b/tools/testing/selftests/bpf/progs/linked_maps2.c
new file mode 100644
index 000000000000..0693687474ed
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_maps2.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* modifiers and typedefs are ignored when comparing key/value types */
+typedef struct my_key { long x; } key_type;
+typedef struct my_value { long x; } value_type;
+
+extern struct {
+ __uint(max_entries, 16);
+ __type(key, key_type);
+ __type(value, value_type);
+ __uint(type, BPF_MAP_TYPE_HASH);
+} map1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 8);
+} map2 SEC(".maps");
+
+/* this definition will lose, but it has to exactly match the winner */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 16);
+} map_weak __weak SEC(".maps");
+
+int output_first2;
+int output_second2;
+int output_weak2;
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler_enter2)
+{
+ /* update values with key = 2 */
+ int key = 2, val = 2;
+ key_type key_struct = { .x = 2 };
+ value_type val_struct = { .x = 2000 };
+
+ bpf_map_update_elem(&map1, &key_struct, &val_struct, 0);
+ bpf_map_update_elem(&map2, &key, &val, 0);
+ bpf_map_update_elem(&map_weak, &key, &val, 0);
+
+ return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int BPF_PROG(handler_exit2)
+{
+ /* lookup values with key = 1, set in another file */
+ int key = 1, *val;
+ key_type key_struct = { .x = 1 };
+ value_type *value_struct;
+
+ value_struct = bpf_map_lookup_elem(&map1, &key_struct);
+ if (value_struct)
+ output_first2 = value_struct->x;
+
+ val = bpf_map_lookup_elem(&map2, &key);
+ if (val)
+ output_second2 = *val;
+
+ val = bpf_map_lookup_elem(&map_weak, &key);
+ if (val)
+ output_weak2 = *val;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_vars1.c b/tools/testing/selftests/bpf/progs/linked_vars1.c
new file mode 100644
index 000000000000..ef9e9d0bb0ca
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_vars1.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+extern int LINUX_KERNEL_VERSION __kconfig;
+/* this weak extern will be strict due to the other file's strong extern */
+extern bool CONFIG_BPF_SYSCALL __kconfig __weak;
+extern const void bpf_link_fops __ksym __weak;
+
+int input_bss1;
+int input_data1 = 1;
+const volatile int input_rodata1 = 11;
+
+int input_bss_weak __weak;
+/* these two definitions should win */
+int input_data_weak __weak = 10;
+const volatile int input_rodata_weak __weak = 100;
+
+extern int input_bss2;
+extern int input_data2;
+extern const int input_rodata2;
+
+int output_bss1;
+int output_data1;
+int output_rodata1;
+
+long output_sink1;
+
+static __noinline int get_bss_res(void)
+{
+ /* just make sure all the relocations work against .text as well */
+ return input_bss1 + input_bss2 + input_bss_weak;
+}
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler1)
+{
+ output_bss1 = get_bss_res();
+ output_data1 = input_data1 + input_data2 + input_data_weak;
+ output_rodata1 = input_rodata1 + input_rodata2 + input_rodata_weak;
+
+ /* make sure we actually use above special externs, otherwise compiler
+ * will optimize them out
+ */
+ output_sink1 = LINUX_KERNEL_VERSION
+ + CONFIG_BPF_SYSCALL
+ + (long)&bpf_link_fops;
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_vars2.c b/tools/testing/selftests/bpf/progs/linked_vars2.c
new file mode 100644
index 000000000000..e4f5bd388a3c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_vars2.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+extern int LINUX_KERNEL_VERSION __kconfig;
+/* when an extern is defined as both strong and weak, resulting symbol will be strong */
+extern bool CONFIG_BPF_SYSCALL __kconfig;
+extern const void __start_BTF __ksym;
+
+int input_bss2;
+int input_data2 = 2;
+const volatile int input_rodata2 = 22;
+
+int input_bss_weak __weak;
+/* these two weak variables should lose */
+int input_data_weak __weak = 20;
+const volatile int input_rodata_weak __weak = 200;
+
+extern int input_bss1;
+extern int input_data1;
+extern const int input_rodata1;
+
+int output_bss2;
+int output_data2;
+int output_rodata2;
+
+int output_sink2;
+
+static __noinline int get_data_res(void)
+{
+ /* just make sure all the relocations work against .text as well */
+ return input_data1 + input_data2 + input_data_weak;
+}
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler2)
+{
+ output_bss2 = input_bss1 + input_bss2 + input_bss_weak;
+ output_data2 = get_data_res();
+ output_rodata2 = input_rodata1 + input_rodata2 + input_rodata_weak;
+
+ /* make sure we actually use above special externs, otherwise compiler
+ * will optimize them out
+ */
+ output_sink2 = LINUX_KERNEL_VERSION
+ + CONFIG_BPF_SYSCALL
+ + (long)&__start_BTF;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
new file mode 100644
index 000000000000..d736506a4c80
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "../bpf_experimental.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+struct plain_local;
+
+struct node_data {
+ long key;
+ long data;
+ struct plain_local __kptr * stashed_in_local_kptr;
+ struct bpf_rb_node node;
+};
+
+struct refcounted_node {
+ long data;
+ struct bpf_rb_node rb_node;
+ struct bpf_refcount refcount;
+};
+
+struct stash {
+ struct bpf_spin_lock l;
+ struct refcounted_node __kptr *stashed;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct stash);
+ __uint(max_entries, 10);
+} refcounted_node_stash SEC(".maps");
+
+struct plain_local {
+ long key;
+ long data;
+};
+
+struct local_with_root {
+ long key;
+ struct bpf_spin_lock l;
+ struct bpf_rb_root r __contains(node_data, node);
+};
+
+struct map_value {
+ struct prog_test_ref_kfunc *not_kptr;
+ struct prog_test_ref_kfunc __kptr *val;
+ struct node_data __kptr *node;
+ struct plain_local __kptr *plain;
+ struct local_with_root __kptr *local_root;
+};
+
+/* This is necessary so that LLVM generates BTF for node_data struct
+ * If it's not included, a fwd reference for node_data will be generated but
+ * no struct. Example BTF of "node" field in map_value when not included:
+ *
+ * [10] PTR '(anon)' type_id=35
+ * [34] FWD 'node_data' fwd_kind=struct
+ * [35] TYPE_TAG 'kptr_ref' type_id=34
+ *
+ * (with no node_data struct defined)
+ * Had to do the same w/ bpf_kfunc_call_test_release below
+ */
+struct node_data *just_here_because_btf_bug;
+struct refcounted_node *just_here_because_btf_bug2;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 2);
+} some_nodes SEC(".maps");
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+
+ return node_a->key < node_b->key;
+}
+
+static int create_and_stash(int idx, int val)
+{
+ struct plain_local *inner_local_kptr;
+ struct map_value *mapval;
+ struct node_data *res;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ inner_local_kptr = bpf_obj_new(typeof(*inner_local_kptr));
+ if (!inner_local_kptr)
+ return 2;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res) {
+ bpf_obj_drop(inner_local_kptr);
+ return 3;
+ }
+ res->key = val;
+
+ inner_local_kptr = bpf_kptr_xchg(&res->stashed_in_local_kptr, inner_local_kptr);
+ if (inner_local_kptr) {
+ /* Should never happen, we just obj_new'd res */
+ bpf_obj_drop(inner_local_kptr);
+ bpf_obj_drop(res);
+ return 4;
+ }
+
+ res = bpf_kptr_xchg(&mapval->node, res);
+ if (res)
+ bpf_obj_drop(res);
+ return 0;
+}
+
+SEC("tc")
+long stash_rb_nodes(void *ctx)
+{
+ return create_and_stash(0, 41) ?: create_and_stash(1, 42);
+}
+
+SEC("tc")
+long stash_plain(void *ctx)
+{
+ struct map_value *mapval;
+ struct plain_local *res;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res)
+ return 1;
+ res->key = 41;
+
+ res = bpf_kptr_xchg(&mapval->plain, res);
+ if (res)
+ bpf_obj_drop(res);
+ return 0;
+}
+
+SEC("tc")
+long stash_local_with_root(void *ctx)
+{
+ struct local_with_root *res;
+ struct map_value *mapval;
+ struct node_data *n;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res)
+ return 2;
+ res->key = 41;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n) {
+ bpf_obj_drop(res);
+ return 3;
+ }
+
+ bpf_spin_lock(&res->l);
+ bpf_rbtree_add(&res->r, &n->node, less);
+ bpf_spin_unlock(&res->l);
+
+ res = bpf_kptr_xchg(&mapval->local_root, res);
+ if (res) {
+ bpf_obj_drop(res);
+ return 4;
+ }
+ return 0;
+}
+
+SEC("tc")
+long unstash_rb_node(void *ctx)
+{
+ struct plain_local *inner_local_kptr = NULL;
+ struct map_value *mapval;
+ struct node_data *res;
+ long retval;
+ int key = 1;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &key);
+ if (!mapval)
+ return 1;
+
+ res = bpf_kptr_xchg(&mapval->node, NULL);
+ if (res) {
+ inner_local_kptr = bpf_kptr_xchg(&res->stashed_in_local_kptr, inner_local_kptr);
+ if (!inner_local_kptr) {
+ bpf_obj_drop(res);
+ return 1;
+ }
+ bpf_obj_drop(inner_local_kptr);
+
+ retval = res->key;
+ bpf_obj_drop(res);
+ return retval;
+ }
+ return 1;
+}
+
+SEC("tc")
+long stash_test_ref_kfunc(void *ctx)
+{
+ struct prog_test_ref_kfunc *res;
+ struct map_value *mapval;
+ int key = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &key);
+ if (!mapval)
+ return 1;
+
+ res = bpf_kptr_xchg(&mapval->val, NULL);
+ if (res)
+ bpf_kfunc_call_test_release(res);
+ return 0;
+}
+
+SEC("tc")
+long refcount_acquire_without_unstash(void *ctx)
+{
+ struct refcounted_node *p;
+ struct stash *s;
+ int ret = 0;
+
+ s = bpf_map_lookup_elem(&refcounted_node_stash, &ret);
+ if (!s)
+ return 1;
+
+ if (!s->stashed)
+ /* refcount_acquire failure is expected when no refcounted_node
+ * has been stashed before this program executes
+ */
+ return 2;
+
+ p = bpf_refcount_acquire(s->stashed);
+ if (!p)
+ return 3;
+
+ ret = s->stashed ? s->stashed->data : -1;
+ bpf_obj_drop(p);
+ return ret;
+}
+
+/* Helper for refcount_acquire_without_unstash test */
+SEC("tc")
+long stash_refcounted_node(void *ctx)
+{
+ struct refcounted_node *p;
+ struct stash *s;
+ int key = 0;
+
+ s = bpf_map_lookup_elem(&refcounted_node_stash, &key);
+ if (!s)
+ return 1;
+
+ p = bpf_obj_new(typeof(*p));
+ if (!p)
+ return 2;
+ p->data = 42;
+
+ p = bpf_kptr_xchg(&s->stashed, p);
+ if (p) {
+ bpf_obj_drop(p);
+ return 3;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c b/tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c
new file mode 100644
index 000000000000..fcf7a7567da2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "../bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct node_data {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+};
+
+struct map_value {
+ struct node_data __kptr *node;
+};
+
+struct node_data2 {
+ long key[4];
+};
+
+/* This is necessary so that LLVM generates BTF for node_data struct
+ * If it's not included, a fwd reference for node_data will be generated but
+ * no struct. Example BTF of "node" field in map_value when not included:
+ *
+ * [10] PTR '(anon)' type_id=35
+ * [34] FWD 'node_data' fwd_kind=struct
+ * [35] TYPE_TAG 'kptr_ref' type_id=34
+ */
+struct node_data *just_here_because_btf_bug;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 2);
+} some_nodes SEC(".maps");
+
+SEC("tc")
+__failure __msg("invalid kptr access, R2 type=ptr_node_data2 expected=ptr_node_data")
+long stash_rb_nodes(void *ctx)
+{
+ struct map_value *mapval;
+ struct node_data2 *res;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res)
+ return 1;
+ res->key[0] = 40;
+
+ res = bpf_kptr_xchg(&mapval->node, res);
+ if (res)
+ bpf_obj_drop(res);
+ return 0;
+}
+
+SEC("tc")
+__failure __msg("R1 must have zero offset when passed to release func")
+long drop_rb_node_off(void *ctx)
+{
+ struct map_value *mapval;
+ struct node_data *res;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res)
+ return 1;
+ /* Try releasing with graph node offset */
+ bpf_obj_drop(&res->node);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c
index 0758ba229ae0..637e75df2e14 100644
--- a/tools/testing/selftests/bpf/progs/local_storage.c
+++ b/tools/testing/selftests/bpf/progs/local_storage.c
@@ -4,9 +4,8 @@
* Copyright 2020 Google LLC.
*/
+#include "vmlinux.h"
#include <errno.h>
-#include <linux/bpf.h>
-#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -14,11 +13,13 @@ char _license[] SEC("license") = "GPL";
#define DUMMY_STORAGE_VALUE 0xdeadbeef
-int monitored_pid = 0;
+__u32 monitored_pid = 0;
int inode_storage_result = -1;
int sk_storage_result = -1;
+int task_storage_result = -1;
-struct dummy_storage {
+struct local_storage {
+ struct inode *exec_inode;
__u32 value;
};
@@ -26,90 +27,172 @@ struct {
__uint(type, BPF_MAP_TYPE_INODE_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
- __type(value, struct dummy_storage);
+ __type(value, struct local_storage);
} inode_storage_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE);
__type(key, int);
- __type(value, struct dummy_storage);
+ __type(value, struct local_storage);
} sk_storage_map SEC(".maps");
-/* TODO Use vmlinux.h once BTF pruning for embedded types is fixed.
- */
-struct sock {} __attribute__((preserve_access_index));
-struct sockaddr {} __attribute__((preserve_access_index));
-struct socket {
- struct sock *sk;
-} __attribute__((preserve_access_index));
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE);
+ __type(key, int);
+ __type(value, struct local_storage);
+} sk_storage_map2 SEC(".maps");
-struct inode {} __attribute__((preserve_access_index));
-struct dentry {
- struct inode *d_inode;
-} __attribute__((preserve_access_index));
-struct file {
- struct inode *f_inode;
-} __attribute__((preserve_access_index));
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct local_storage);
+} task_storage_map SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct local_storage);
+} task_storage_map2 SEC(".maps");
SEC("lsm/inode_unlink")
int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
- struct dummy_storage *storage;
+ struct bpf_local_storage *local_storage;
+ struct local_storage *storage;
+ struct task_struct *task;
+ bool is_self_unlink;
if (pid != monitored_pid)
return 0;
- storage = bpf_inode_storage_get(&inode_storage_map, victim->d_inode, 0,
- BPF_SK_STORAGE_GET_F_CREATE);
+ task = bpf_get_current_task_btf();
+ if (!task)
+ return 0;
+
+ task_storage_result = -1;
+
+ storage = bpf_task_storage_get(&task_storage_map, task, 0, 0);
+ if (!storage)
+ return 0;
+
+ /* Don't let an executable delete itself */
+ is_self_unlink = storage->exec_inode == victim->d_inode;
+
+ storage = bpf_task_storage_get(&task_storage_map2, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage || storage->value)
+ return 0;
+
+ if (bpf_task_storage_delete(&task_storage_map, task))
+ return 0;
+
+ /* Ensure that the task_storage_map is disconnected from the storage.
+ * The storage memory should not be freed back to the
+ * bpf_mem_alloc.
+ */
+ local_storage = task->bpf_storage;
+ if (!local_storage || local_storage->smap)
+ return 0;
+
+ task_storage_result = 0;
+
+ return is_self_unlink ? -EPERM : 0;
+}
+
+SEC("lsm.s/inode_rename")
+int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ struct local_storage *storage;
+ int err;
+
+ /* new_dentry->d_inode can be NULL when the inode is renamed to a file
+ * that did not exist before. The helper should be able to handle this
+ * NULL pointer.
+ */
+ bpf_inode_storage_get(&inode_storage_map, new_dentry->d_inode, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+
+ storage = bpf_inode_storage_get(&inode_storage_map, old_dentry->d_inode,
+ 0, 0);
if (!storage)
return 0;
- if (storage->value == DUMMY_STORAGE_VALUE)
+ if (storage->value != DUMMY_STORAGE_VALUE)
inode_storage_result = -1;
- inode_storage_result =
- bpf_inode_storage_delete(&inode_storage_map, victim->d_inode);
+ err = bpf_inode_storage_delete(&inode_storage_map, old_dentry->d_inode);
+ if (!err)
+ inode_storage_result = err;
return 0;
}
-SEC("lsm/socket_bind")
+SEC("lsm.s/socket_bind")
int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
int addrlen)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
- struct dummy_storage *storage;
+ struct local_storage *storage;
+ struct sock *sk = sock->sk;
- if (pid != monitored_pid)
+ if (pid != monitored_pid || !sk)
+ return 0;
+
+ storage = bpf_sk_storage_get(&sk_storage_map, sk, 0, 0);
+ if (!storage)
+ return 0;
+
+ sk_storage_result = -1;
+ if (storage->value != DUMMY_STORAGE_VALUE)
+ return 0;
+
+ /* This tests that we can associate multiple elements
+ * with the local storage.
+ */
+ storage = bpf_sk_storage_get(&sk_storage_map2, sk, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 0;
+
+ if (bpf_sk_storage_delete(&sk_storage_map2, sk))
return 0;
- storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0,
- BPF_SK_STORAGE_GET_F_CREATE);
+ storage = bpf_sk_storage_get(&sk_storage_map2, sk, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
- if (storage->value == DUMMY_STORAGE_VALUE)
- sk_storage_result = -1;
+ if (bpf_sk_storage_delete(&sk_storage_map, sk))
+ return 0;
- sk_storage_result = bpf_sk_storage_delete(&sk_storage_map, sock->sk);
+ /* Ensure that the sk_storage_map is disconnected from the storage. */
+ if (!sk->sk_bpf_storage || sk->sk_bpf_storage->smap)
+ return 0;
+
+ sk_storage_result = 0;
return 0;
}
-SEC("lsm/socket_post_create")
+SEC("lsm.s/socket_post_create")
int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
int protocol, int kern)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
- struct dummy_storage *storage;
+ struct local_storage *storage;
+ struct sock *sk = sock->sk;
- if (pid != monitored_pid)
+ if (pid != monitored_pid || !sk)
return 0;
- storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0,
- BPF_SK_STORAGE_GET_F_CREATE);
+ storage = bpf_sk_storage_get(&sk_storage_map, sk, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
@@ -118,23 +201,28 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
return 0;
}
-SEC("lsm/file_open")
-int BPF_PROG(file_open, struct file *file)
+/* This uses the local storage to remember the inode of the binary that a
+ * process was originally executing.
+ */
+SEC("lsm.s/bprm_committed_creds")
+void BPF_PROG(exec, struct linux_binprm *bprm)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
- struct dummy_storage *storage;
+ struct local_storage *storage;
if (pid != monitored_pid)
- return 0;
+ return;
- if (!file->f_inode)
- return 0;
+ storage = bpf_task_storage_get(&task_storage_map,
+ bpf_get_current_task_btf(), 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (storage)
+ storage->exec_inode = bprm->file->f_inode;
- storage = bpf_inode_storage_get(&inode_storage_map, file->f_inode, 0,
- BPF_LOCAL_STORAGE_GET_F_CREATE);
+ storage = bpf_inode_storage_get(&inode_storage_map, bprm->file->f_inode,
+ 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
- return 0;
+ return;
storage->value = DUMMY_STORAGE_VALUE;
- return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/local_storage_bench.c b/tools/testing/selftests/bpf/progs/local_storage_bench.c
new file mode 100644
index 000000000000..2c3234c5b73a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/local_storage_bench.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define HASHMAP_SZ 4194304
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1000);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+ });
+} array_of_local_storage_maps SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1000);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, HASHMAP_SZ);
+ __type(key, int);
+ __type(value, int);
+ });
+} array_of_hash_maps SEC(".maps");
+
+long important_hits;
+long hits;
+
+/* set from user-space */
+const volatile unsigned int use_hashmap;
+const volatile unsigned int hashmap_num_keys;
+const volatile unsigned int num_maps;
+const volatile unsigned int interleave;
+
+struct loop_ctx {
+ struct task_struct *task;
+ long loop_hits;
+ long loop_important_hits;
+};
+
+static int do_lookup(unsigned int elem, struct loop_ctx *lctx)
+{
+ void *map, *inner_map;
+ int idx = 0;
+
+ if (use_hashmap)
+ map = &array_of_hash_maps;
+ else
+ map = &array_of_local_storage_maps;
+
+ inner_map = bpf_map_lookup_elem(map, &elem);
+ if (!inner_map)
+ return -1;
+
+ if (use_hashmap) {
+ idx = bpf_get_prandom_u32() % hashmap_num_keys;
+ bpf_map_lookup_elem(inner_map, &idx);
+ } else {
+ bpf_task_storage_get(inner_map, lctx->task, &idx,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ }
+
+ lctx->loop_hits++;
+ if (!elem)
+ lctx->loop_important_hits++;
+ return 0;
+}
+
+static long loop(u32 index, void *ctx)
+{
+ struct loop_ctx *lctx = (struct loop_ctx *)ctx;
+ unsigned int map_idx = index % num_maps;
+
+ do_lookup(map_idx, lctx);
+ if (interleave && map_idx % 3 == 0)
+ do_lookup(0, lctx);
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int get_local(void *ctx)
+{
+ struct loop_ctx lctx;
+
+ lctx.task = bpf_get_current_task_btf();
+ lctx.loop_hits = 0;
+ lctx.loop_important_hits = 0;
+ bpf_loop(10000, &loop, &lctx, 0);
+ __sync_add_and_fetch(&hits, lctx.loop_hits);
+ __sync_add_and_fetch(&important_hits, lctx.loop_important_hits);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c b/tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c
new file mode 100644
index 000000000000..03bf69f49075
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} task_storage SEC(".maps");
+
+long hits;
+long gp_hits;
+long gp_times;
+long current_gp_start;
+long unexpected;
+bool postgp_seen;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int get_local(void *ctx)
+{
+ struct task_struct *task;
+ int idx;
+ int *s;
+
+ idx = 0;
+ task = bpf_get_current_task_btf();
+ s = bpf_task_storage_get(&task_storage, task, &idx,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!s)
+ return 0;
+
+ *s = 3;
+ bpf_task_storage_delete(&task_storage, task);
+ __sync_add_and_fetch(&hits, 1);
+ return 0;
+}
+
+SEC("fentry/rcu_tasks_trace_pregp_step")
+int pregp_step(struct pt_regs *ctx)
+{
+ current_gp_start = bpf_ktime_get_ns();
+ return 0;
+}
+
+SEC("fentry/rcu_tasks_trace_postgp")
+int postgp(struct pt_regs *ctx)
+{
+ if (!current_gp_start && postgp_seen) {
+ /* Will only happen if prog tracing rcu_tasks_trace_pregp_step doesn't
+ * execute before this prog
+ */
+ __sync_add_and_fetch(&unexpected, 1);
+ return 0;
+ }
+
+ __sync_add_and_fetch(&gp_times, bpf_ktime_get_ns() - current_gp_start);
+ __sync_add_and_fetch(&gp_hits, 1);
+ current_gp_start = 0;
+ postgp_seen = true;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/loop3.c b/tools/testing/selftests/bpf/progs/loop3.c
index 76e93b31c14b..717dab14322b 100644
--- a/tools/testing/selftests/bpf/progs/loop3.c
+++ b/tools/testing/selftests/bpf/progs/loop3.c
@@ -12,9 +12,9 @@
char _license[] SEC("license") = "GPL";
SEC("raw_tracepoint/consume_skb")
-int while_true(volatile struct pt_regs* ctx)
+int while_true(struct pt_regs *ctx)
{
- __u64 i = 0, sum = 0;
+ volatile __u64 i = 0, sum = 0;
do {
i++;
sum += PT_REGS_RC(ctx);
diff --git a/tools/testing/selftests/bpf/progs/loop4.c b/tools/testing/selftests/bpf/progs/loop4.c
index b35337926d66..0de0357f57cc 100644
--- a/tools/testing/selftests/bpf/progs/loop4.c
+++ b/tools/testing/selftests/bpf/progs/loop4.c
@@ -3,6 +3,8 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
char _license[] SEC("license") = "GPL";
SEC("socket")
@@ -10,7 +12,7 @@ int combinations(volatile struct __sk_buff* skb)
{
int ret = 0, i;
-#pragma nounroll
+ __pragma_loop_no_unroll
for (i = 0; i < 20; i++)
if (skb->len)
ret |= 1 << i;
diff --git a/tools/testing/selftests/bpf/progs/loop5.c b/tools/testing/selftests/bpf/progs/loop5.c
index 913791923fa3..1b13f37f85ec 100644
--- a/tools/testing/selftests/bpf/progs/loop5.c
+++ b/tools/testing/selftests/bpf/progs/loop5.c
@@ -2,7 +2,6 @@
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#define barrier() __asm__ __volatile__("": : :"memory")
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/loop6.c b/tools/testing/selftests/bpf/progs/loop6.c
new file mode 100644
index 000000000000..e4ff97fbcce1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/loop6.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ptrace.h>
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* typically virtio scsi has max SGs of 6 */
+#define VIRTIO_MAX_SGS 6
+
+/* Verifier will fail with SG_MAX = 128. The failure can be
+ * workarounded with a smaller SG_MAX, e.g. 10.
+ */
+#define WORKAROUND
+#ifdef WORKAROUND
+#define SG_MAX 10
+#else
+/* typically virtio blk has max SEG of 128 */
+#define SG_MAX 128
+#endif
+
+#define SG_CHAIN 0x01UL
+#define SG_END 0x02UL
+
+struct scatterlist {
+ unsigned long page_link;
+ unsigned int offset;
+ unsigned int length;
+};
+
+#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
+#define sg_is_last(sg) ((sg)->page_link & SG_END)
+#define sg_chain_ptr(sg) \
+ ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
+
+static inline struct scatterlist *__sg_next(struct scatterlist *sgp)
+{
+ struct scatterlist sg;
+
+ bpf_probe_read_kernel(&sg, sizeof(sg), sgp);
+ if (sg_is_last(&sg))
+ return NULL;
+
+ sgp++;
+
+ bpf_probe_read_kernel(&sg, sizeof(sg), sgp);
+ if (sg_is_chain(&sg))
+ sgp = sg_chain_ptr(&sg);
+
+ return sgp;
+}
+
+static inline struct scatterlist *get_sgp(struct scatterlist **sgs, int i)
+{
+ struct scatterlist *sgp;
+
+ bpf_probe_read_kernel(&sgp, sizeof(sgp), sgs + i);
+ return sgp;
+}
+
+int config = 0;
+int result = 0;
+
+SEC("kprobe/virtqueue_add_sgs")
+int BPF_KPROBE(trace_virtqueue_add_sgs, void *unused, struct scatterlist **sgs,
+ unsigned int out_sgs, unsigned int in_sgs)
+{
+ struct scatterlist *sgp = NULL;
+ __u64 length1 = 0, length2 = 0;
+ unsigned int i, n, len;
+
+ if (config != 0)
+ return 0;
+
+ for (i = 0; (i < VIRTIO_MAX_SGS) && (i < out_sgs); i++) {
+ __sink(out_sgs);
+ for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX);
+ sgp = __sg_next(sgp)) {
+ bpf_probe_read_kernel(&len, sizeof(len), &sgp->length);
+ length1 += len;
+ n++;
+ }
+ }
+
+ for (i = 0; (i < VIRTIO_MAX_SGS) && (i < in_sgs); i++) {
+ __sink(in_sgs);
+ for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX);
+ sgp = __sg_next(sgp)) {
+ bpf_probe_read_kernel(&len, sizeof(len), &sgp->length);
+ length2 += len;
+ n++;
+ }
+ }
+
+ config = 1;
+ result = length2 - length1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/lru_bug.c b/tools/testing/selftests/bpf/progs/lru_bug.c
new file mode 100644
index 000000000000..ad73029cb1e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lru_bug.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct map_value {
+ struct task_struct __kptr_untrusted *ptr;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct map_value);
+} lru_map SEC(".maps");
+
+int pid = 0;
+int result = 1;
+
+SEC("fentry/bpf_ktime_get_ns")
+int printk(void *ctx)
+{
+ struct map_value v = {};
+
+ if (pid == bpf_get_current_task_btf()->pid)
+ bpf_map_update_elem(&lru_map, &(int){0}, &v, 0);
+ return 0;
+}
+
+SEC("fentry/do_nanosleep")
+int nanosleep(void *ctx)
+{
+ struct map_value val = {}, *v;
+ struct task_struct *current;
+
+ bpf_map_update_elem(&lru_map, &(int){0}, &val, 0);
+ v = bpf_map_lookup_elem(&lru_map, &(int){0});
+ if (!v)
+ return 0;
+ bpf_map_delete_elem(&lru_map, &(int){0});
+ current = bpf_get_current_task_btf();
+ v->ptr = current;
+ pid = current->pid;
+ bpf_ktime_get_ns();
+ result = !v->ptr;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/lsm.c b/tools/testing/selftests/bpf/progs/lsm.c
index ff4d343b94b5..0c13b7409947 100644
--- a/tools/testing/selftests/bpf/progs/lsm.c
+++ b/tools/testing/selftests/bpf/progs/lsm.c
@@ -5,9 +5,11 @@
*/
#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include <errno.h>
+#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
@@ -30,6 +32,53 @@ struct {
__type(value, __u64);
} lru_hash SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_hash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} lru_percpu_hash SEC(".maps");
+
+struct inner_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, __u64);
+} inner_map SEC(".maps");
+
+struct outer_arr {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __array(values, struct inner_map);
+} outer_arr SEC(".maps") = {
+ .values = { [0] = &inner_map },
+};
+
+struct outer_hash {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(int));
+ __array(values, struct inner_map);
+} outer_hash SEC(".maps") = {
+ .values = { [0] = &inner_map },
+};
+
char _license[] SEC("license") = "GPL";
int monitored_pid = 0;
@@ -43,7 +92,7 @@ int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
if (ret != 0)
return ret;
- __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ __s32 pid = bpf_get_current_pid_tgid() >> 32;
int is_stack = 0;
is_stack = (vma->vm_start <= vma->vm_mm->start_stack &&
@@ -61,6 +110,7 @@ SEC("lsm.s/bprm_committed_creds")
int BPF_PROG(test_void_hook, struct linux_binprm *bprm)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
+ struct inner_map *inner_map;
char args[64];
__u32 key = 0;
__u64 *value;
@@ -80,6 +130,27 @@ int BPF_PROG(test_void_hook, struct linux_binprm *bprm)
value = bpf_map_lookup_elem(&lru_hash, &key);
if (value)
*value = 0;
+ value = bpf_map_lookup_elem(&percpu_array, &key);
+ if (value)
+ *value = 0;
+ value = bpf_map_lookup_elem(&percpu_hash, &key);
+ if (value)
+ *value = 0;
+ value = bpf_map_lookup_elem(&lru_percpu_hash, &key);
+ if (value)
+ *value = 0;
+ inner_map = bpf_map_lookup_elem(&outer_arr, &key);
+ if (inner_map) {
+ value = bpf_map_lookup_elem(inner_map, &key);
+ if (value)
+ *value = 0;
+ }
+ inner_map = bpf_map_lookup_elem(&outer_hash, &key);
+ if (inner_map) {
+ value = bpf_map_lookup_elem(inner_map, &key);
+ if (value)
+ *value = 0;
+ }
return 0;
}
@@ -91,11 +162,11 @@ int BPF_PROG(test_task_free, struct task_struct *task)
int copy_test = 0;
-SEC("fentry.s/__x64_sys_setdomainname")
+SEC("fentry.s/" SYS_PREFIX "sys_setdomainname")
int BPF_PROG(test_sys_setdomainname, struct pt_regs *regs)
{
- void *ptr = (void *)PT_REGS_PARM1(regs);
- int len = PT_REGS_PARM2(regs);
+ void *ptr = (void *)PT_REGS_PARM1_SYSCALL(regs);
+ int len = PT_REGS_PARM2_SYSCALL(regs);
int buf = 0;
long ret;
diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup.c b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
new file mode 100644
index 000000000000..d7598538aa2d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+extern bool CONFIG_SECURITY_SELINUX __kconfig __weak;
+extern bool CONFIG_SECURITY_SMACK __kconfig __weak;
+extern bool CONFIG_SECURITY_APPARMOR __kconfig __weak;
+
+#ifndef AF_PACKET
+#define AF_PACKET 17
+#endif
+
+#ifndef AF_UNIX
+#define AF_UNIX 1
+#endif
+
+#ifndef EPERM
+#define EPERM 1
+#endif
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
+ __type(key, __u64);
+ __type(value, __u64);
+} cgroup_storage SEC(".maps");
+
+int called_socket_post_create;
+int called_socket_post_create2;
+int called_socket_bind;
+int called_socket_bind2;
+int called_socket_alloc;
+int called_socket_clone;
+
+static __always_inline int test_local_storage(void)
+{
+ __u64 *val;
+
+ val = bpf_get_local_storage(&cgroup_storage, 0);
+ if (!val)
+ return 0;
+ *val += 1;
+
+ return 1;
+}
+
+static __always_inline int real_create(struct socket *sock, int family,
+ int protocol)
+{
+ struct sock *sk;
+ int prio = 123;
+
+ /* Reject non-tx-only AF_PACKET. */
+ if (family == AF_PACKET && protocol != 0)
+ return 0; /* EPERM */
+
+ sk = sock->sk;
+ if (!sk)
+ return 1;
+
+ /* The rest of the sockets get default policy. */
+ if (bpf_setsockopt(sk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 0; /* EPERM */
+
+ /* Make sure bpf_getsockopt is allowed and works. */
+ prio = 0;
+ if (bpf_getsockopt(sk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 0; /* EPERM */
+ if (prio != 123)
+ return 0; /* EPERM */
+
+ /* Can access cgroup local storage. */
+ if (!test_local_storage())
+ return 0; /* EPERM */
+
+ return 1;
+}
+
+/* __cgroup_bpf_run_lsm_socket */
+SEC("lsm_cgroup/socket_post_create")
+int BPF_PROG(socket_post_create, struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ called_socket_post_create++;
+ return real_create(sock, family, protocol);
+}
+
+/* __cgroup_bpf_run_lsm_socket */
+SEC("lsm_cgroup/socket_post_create")
+int BPF_PROG(socket_post_create2, struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ called_socket_post_create2++;
+ return real_create(sock, family, protocol);
+}
+
+static __always_inline int real_bind(struct socket *sock,
+ struct sockaddr *address,
+ int addrlen)
+{
+ struct sockaddr_ll sa = {};
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 1;
+
+ if (sk->__sk_common.skc_family != AF_PACKET)
+ return 1;
+
+ if (sk->sk_kern_sock)
+ return 1;
+
+ bpf_probe_read_kernel(&sa, sizeof(sa), address);
+ if (sa.sll_protocol)
+ return 0; /* EPERM */
+
+ /* Can access cgroup local storage. */
+ if (!test_local_storage())
+ return 0; /* EPERM */
+
+ return 1;
+}
+
+/* __cgroup_bpf_run_lsm_socket */
+SEC("lsm_cgroup/socket_bind")
+int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+{
+ called_socket_bind++;
+ return real_bind(sock, address, addrlen);
+}
+
+/* __cgroup_bpf_run_lsm_socket */
+SEC("lsm_cgroup/socket_bind")
+int BPF_PROG(socket_bind2, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+{
+ called_socket_bind2++;
+ return real_bind(sock, address, addrlen);
+}
+
+/* __cgroup_bpf_run_lsm_current (via bpf_lsm_current_hooks) */
+SEC("lsm_cgroup/sk_alloc_security")
+int BPF_PROG(socket_alloc, struct sock *sk, int family, gfp_t priority)
+{
+ called_socket_alloc++;
+ /* if already have non-bpf lsms installed, EPERM will cause memory leak of non-bpf lsms */
+ if (CONFIG_SECURITY_SELINUX || CONFIG_SECURITY_SMACK || CONFIG_SECURITY_APPARMOR)
+ return 1;
+
+ if (family == AF_UNIX)
+ return 0; /* EPERM */
+
+ /* Can access cgroup local storage. */
+ if (!test_local_storage())
+ return 0; /* EPERM */
+
+ return 1;
+}
+
+/* __cgroup_bpf_run_lsm_sock */
+SEC("lsm_cgroup/inet_csk_clone")
+int BPF_PROG(socket_clone, struct sock *newsk, const struct request_sock *req)
+{
+ int prio = 234;
+
+ if (!newsk)
+ return 1;
+
+ /* Accepted request sockets get a different priority. */
+ if (bpf_setsockopt(newsk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 1;
+
+ /* Make sure bpf_getsockopt is allowed and works. */
+ prio = 0;
+ if (bpf_getsockopt(newsk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 1;
+ if (prio != 234)
+ return 1;
+
+ /* Can access cgroup local storage. */
+ if (!test_local_storage())
+ return 1;
+
+ called_socket_clone++;
+
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c b/tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c
new file mode 100644
index 000000000000..6cb0f161f417
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("lsm_cgroup/inet_csk_clone")
+int BPF_PROG(nonvoid_socket_clone, struct sock *newsk, const struct request_sock *req)
+{
+ /* Can not return any errors from void LSM hooks. */
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/lsm_tailcall.c b/tools/testing/selftests/bpf/progs/lsm_tailcall.c
new file mode 100644
index 000000000000..49c075ce2d4c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lsm_tailcall.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Huawei Technologies Co., Ltd */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("lsm/file_permission")
+int lsm_file_permission_prog(void *ctx)
+{
+ return 0;
+}
+
+SEC("lsm/file_alloc_security")
+int lsm_file_alloc_security_prog(void *ctx)
+{
+ return 0;
+}
+
+SEC("lsm/file_alloc_security")
+int lsm_file_alloc_security_entry(void *ctx)
+{
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/map_in_map_btf.c b/tools/testing/selftests/bpf/progs/map_in_map_btf.c
new file mode 100644
index 000000000000..7a1336d7b16a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_in_map_btf.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct node_data {
+ __u64 data;
+ struct bpf_list_node node;
+};
+
+struct map_value {
+ struct bpf_list_head head __contains(node_data, node);
+ struct bpf_spin_lock lock;
+};
+
+struct inner_array_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} inner_array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+ __array(values, struct inner_array_type);
+} outer_array SEC(".maps") = {
+ .values = {
+ [0] = &inner_array,
+ },
+};
+
+char _license[] SEC("license") = "GPL";
+
+int pid = 0;
+bool done = false;
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int add_to_list_in_inner_array(void *ctx)
+{
+ struct map_value *value;
+ struct node_data *new;
+ struct bpf_map *map;
+ int zero = 0;
+
+ if (done || (u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ map = bpf_map_lookup_elem(&outer_array, &zero);
+ if (!map)
+ return 0;
+
+ value = bpf_map_lookup_elem(map, &zero);
+ if (!value)
+ return 0;
+
+ new = bpf_obj_new(typeof(*new));
+ if (!new)
+ return 0;
+
+ bpf_spin_lock(&value->lock);
+ bpf_list_push_back(&value->head, &new->node);
+ bpf_spin_unlock(&value->lock);
+ done = true;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
new file mode 100644
index 000000000000..edaba481db9d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_kptr.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+struct map_value {
+ struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
+ struct prog_test_ref_kfunc __kptr *ref_ptr;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} array_map SEC(".maps");
+
+struct pcpu_array_map {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} pcpu_array_map SEC(".maps");
+
+struct hash_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} hash_map SEC(".maps");
+
+struct pcpu_hash_map {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} pcpu_hash_map SEC(".maps");
+
+struct hash_malloc_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} hash_malloc_map SEC(".maps");
+
+struct pcpu_hash_malloc_map {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} pcpu_hash_malloc_map SEC(".maps");
+
+struct lru_hash_map {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} lru_hash_map SEC(".maps");
+
+struct lru_pcpu_hash_map {
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} lru_pcpu_hash_map SEC(".maps");
+
+struct cgrp_ls_map {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct map_value);
+} cgrp_ls_map SEC(".maps");
+
+struct task_ls_map {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct map_value);
+} task_ls_map SEC(".maps");
+
+struct inode_ls_map {
+ __uint(type, BPF_MAP_TYPE_INODE_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct map_value);
+} inode_ls_map SEC(".maps");
+
+struct sk_ls_map {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct map_value);
+} sk_ls_map SEC(".maps");
+
+#define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name) \
+ struct { \
+ __uint(type, map_type); \
+ __uint(max_entries, 1); \
+ __uint(key_size, sizeof(int)); \
+ __uint(value_size, sizeof(int)); \
+ __array(values, struct inner_map_type); \
+ } name SEC(".maps") = { \
+ .values = { [0] = &inner_map_type }, \
+ }
+
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, pcpu_array_map, array_of_pcpu_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, pcpu_hash_map, array_of_pcpu_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, pcpu_array_map, hash_of_pcpu_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, pcpu_hash_map, hash_of_pcpu_hash_maps);
+
+#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
+
+static void test_kptr_unref(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = v->unref_ptr;
+ /* store untrusted_ptr_or_null_ */
+ WRITE_ONCE(v->unref_ptr, p);
+ if (!p)
+ return;
+ if (p->a + p->b > 100)
+ return;
+ /* store untrusted_ptr_ */
+ WRITE_ONCE(v->unref_ptr, p);
+ /* store NULL */
+ WRITE_ONCE(v->unref_ptr, NULL);
+}
+
+static void test_kptr_ref(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = v->ref_ptr;
+ /* store ptr_or_null_ */
+ WRITE_ONCE(v->unref_ptr, p);
+ if (!p)
+ return;
+ /*
+ * p is rcu_ptr_prog_test_ref_kfunc,
+ * because bpf prog is non-sleepable and runs in RCU CS.
+ * p can be passed to kfunc that requires KF_RCU.
+ */
+ bpf_kfunc_call_test_ref(p);
+ if (p->a + p->b > 100)
+ return;
+ /* store NULL */
+ p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+ if (!p)
+ return;
+ /*
+ * p is trusted_ptr_prog_test_ref_kfunc.
+ * p can be passed to kfunc that requires KF_RCU.
+ */
+ bpf_kfunc_call_test_ref(p);
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ /* store ptr_ */
+ WRITE_ONCE(v->unref_ptr, p);
+ bpf_kfunc_call_test_release(p);
+
+ p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!p)
+ return;
+ /* store ptr_ */
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (!p)
+ return;
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ bpf_kfunc_call_test_release(p);
+}
+
+static void test_kptr(struct map_value *v)
+{
+ test_kptr_unref(v);
+ test_kptr_ref(v);
+}
+
+SEC("tc")
+int test_map_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+#define TEST(map) \
+ v = bpf_map_lookup_elem(&map, &key); \
+ if (!v) \
+ return 0; \
+ test_kptr(v)
+
+ TEST(array_map);
+ TEST(hash_map);
+ TEST(hash_malloc_map);
+ TEST(lru_hash_map);
+ TEST(pcpu_array_map);
+ TEST(pcpu_hash_map);
+
+#undef TEST
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_map_kptr, struct cgroup *cgrp, const char *path)
+{
+ struct map_value *v;
+
+ v = bpf_cgrp_storage_get(&cgrp_ls_map, cgrp, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (v)
+ test_kptr(v);
+ return 0;
+}
+
+SEC("lsm/inode_unlink")
+int BPF_PROG(test_task_map_kptr, struct inode *inode, struct dentry *victim)
+{
+ struct task_struct *task;
+ struct map_value *v;
+
+ task = bpf_get_current_task_btf();
+ if (!task)
+ return 0;
+ v = bpf_task_storage_get(&task_ls_map, task, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (v)
+ test_kptr(v);
+ return 0;
+}
+
+SEC("lsm/inode_unlink")
+int BPF_PROG(test_inode_map_kptr, struct inode *inode, struct dentry *victim)
+{
+ struct map_value *v;
+
+ v = bpf_inode_storage_get(&inode_ls_map, inode, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (v)
+ test_kptr(v);
+ return 0;
+}
+
+SEC("tc")
+int test_sk_map_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ struct bpf_sock *sk;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 0;
+ v = bpf_sk_storage_get(&sk_ls_map, sk, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (v)
+ test_kptr(v);
+ return 0;
+}
+
+SEC("tc")
+int test_map_in_map_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+ void *map;
+
+#define TEST(map_in_map) \
+ map = bpf_map_lookup_elem(&map_in_map, &key); \
+ if (!map) \
+ return 0; \
+ v = bpf_map_lookup_elem(map, &key); \
+ if (!v) \
+ return 0; \
+ test_kptr(v)
+
+ TEST(array_of_array_maps);
+ TEST(array_of_hash_maps);
+ TEST(array_of_hash_malloc_maps);
+ TEST(array_of_lru_hash_maps);
+ TEST(array_of_pcpu_array_maps);
+ TEST(array_of_pcpu_hash_maps);
+ TEST(hash_of_array_maps);
+ TEST(hash_of_hash_maps);
+ TEST(hash_of_hash_malloc_maps);
+ TEST(hash_of_lru_hash_maps);
+ TEST(hash_of_pcpu_array_maps);
+ TEST(hash_of_pcpu_hash_maps);
+
+#undef TEST
+ return 0;
+}
+
+int ref = 1;
+
+static __always_inline
+int test_map_kptr_ref_pre(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p, *p_st;
+ unsigned long arg = 0;
+ int ret;
+
+ p = bpf_kfunc_call_test_acquire(&arg);
+ if (!p)
+ return 1;
+ ref++;
+
+ p_st = p->next;
+ if (p_st->cnt.refs.counter != ref) {
+ ret = 2;
+ goto end;
+ }
+
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (p) {
+ ret = 3;
+ goto end;
+ }
+ if (p_st->cnt.refs.counter != ref)
+ return 4;
+
+ p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+ if (!p)
+ return 5;
+ bpf_kfunc_call_test_release(p);
+ ref--;
+ if (p_st->cnt.refs.counter != ref)
+ return 6;
+
+ p = bpf_kfunc_call_test_acquire(&arg);
+ if (!p)
+ return 7;
+ ref++;
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (p) {
+ ret = 8;
+ goto end;
+ }
+ if (p_st->cnt.refs.counter != ref)
+ return 9;
+ /* Leave in map */
+
+ return 0;
+end:
+ ref--;
+ bpf_kfunc_call_test_release(p);
+ return ret;
+}
+
+static __always_inline
+int test_map_kptr_ref_post(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p, *p_st;
+
+ p_st = v->ref_ptr;
+ if (!p_st || p_st->cnt.refs.counter != ref)
+ return 1;
+
+ p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+ if (!p)
+ return 2;
+ if (p_st->cnt.refs.counter != ref) {
+ bpf_kfunc_call_test_release(p);
+ return 3;
+ }
+
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (p) {
+ bpf_kfunc_call_test_release(p);
+ return 4;
+ }
+ if (p_st->cnt.refs.counter != ref)
+ return 5;
+
+ return 0;
+}
+
+#define TEST(map) \
+ v = bpf_map_lookup_elem(&map, &key); \
+ if (!v) \
+ return -1; \
+ ret = test_map_kptr_ref_pre(v); \
+ if (ret) \
+ return ret;
+
+#define TEST_PCPU(map) \
+ v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
+ if (!v) \
+ return -1; \
+ ret = test_map_kptr_ref_pre(v); \
+ if (ret) \
+ return ret;
+
+SEC("tc")
+int test_map_kptr_ref1(struct __sk_buff *ctx)
+{
+ struct map_value *v, val = {};
+ int key = 0, ret;
+
+ bpf_map_update_elem(&hash_map, &key, &val, 0);
+ bpf_map_update_elem(&hash_malloc_map, &key, &val, 0);
+ bpf_map_update_elem(&lru_hash_map, &key, &val, 0);
+
+ bpf_map_update_elem(&pcpu_hash_map, &key, &val, 0);
+ bpf_map_update_elem(&pcpu_hash_malloc_map, &key, &val, 0);
+ bpf_map_update_elem(&lru_pcpu_hash_map, &key, &val, 0);
+
+ TEST(array_map);
+ TEST(hash_map);
+ TEST(hash_malloc_map);
+ TEST(lru_hash_map);
+
+ TEST_PCPU(pcpu_array_map);
+ TEST_PCPU(pcpu_hash_map);
+ TEST_PCPU(pcpu_hash_malloc_map);
+ TEST_PCPU(lru_pcpu_hash_map);
+
+ return 0;
+}
+
+#undef TEST
+#undef TEST_PCPU
+
+#define TEST(map) \
+ v = bpf_map_lookup_elem(&map, &key); \
+ if (!v) \
+ return -1; \
+ ret = test_map_kptr_ref_post(v); \
+ if (ret) \
+ return ret;
+
+#define TEST_PCPU(map) \
+ v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
+ if (!v) \
+ return -1; \
+ ret = test_map_kptr_ref_post(v); \
+ if (ret) \
+ return ret;
+
+SEC("tc")
+int test_map_kptr_ref2(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0, ret;
+
+ TEST(array_map);
+ TEST(hash_map);
+ TEST(hash_malloc_map);
+ TEST(lru_hash_map);
+
+ TEST_PCPU(pcpu_array_map);
+ TEST_PCPU(pcpu_hash_map);
+ TEST_PCPU(pcpu_hash_malloc_map);
+ TEST_PCPU(lru_pcpu_hash_map);
+
+ return 0;
+}
+
+#undef TEST
+#undef TEST_PCPU
+
+SEC("tc")
+int test_map_kptr_ref3(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ unsigned long sp = 0;
+
+ p = bpf_kfunc_call_test_acquire(&sp);
+ if (!p)
+ return 1;
+ ref++;
+ if (p->cnt.refs.counter != ref) {
+ bpf_kfunc_call_test_release(p);
+ return 2;
+ }
+ bpf_kfunc_call_test_release(p);
+ ref--;
+ return 0;
+}
+
+SEC("syscall")
+int test_ls_map_kptr_ref1(void *ctx)
+{
+ struct task_struct *current;
+ struct map_value *v;
+
+ current = bpf_get_current_task_btf();
+ if (!current)
+ return 100;
+ v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
+ if (v)
+ return 150;
+ v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!v)
+ return 200;
+ return test_map_kptr_ref_pre(v);
+}
+
+SEC("syscall")
+int test_ls_map_kptr_ref2(void *ctx)
+{
+ struct task_struct *current;
+ struct map_value *v;
+
+ current = bpf_get_current_task_btf();
+ if (!current)
+ return 100;
+ v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
+ if (!v)
+ return 200;
+ return test_map_kptr_ref_post(v);
+}
+
+SEC("syscall")
+int test_ls_map_kptr_ref_del(void *ctx)
+{
+ struct task_struct *current;
+ struct map_value *v;
+
+ current = bpf_get_current_task_btf();
+ if (!current)
+ return 100;
+ v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
+ if (!v)
+ return 200;
+ if (!v->ref_ptr)
+ return 300;
+ return bpf_task_storage_delete(&task_ls_map, current);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
new file mode 100644
index 000000000000..4c0ff01f1a96
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+struct map_value {
+ char buf[8];
+ struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
+ struct prog_test_ref_kfunc __kptr *ref_ptr;
+ struct prog_test_member __kptr *ref_memb_ptr;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} array_map SEC(".maps");
+
+SEC("?tc")
+__failure __msg("kptr access size must be BPF_DW")
+int size_not_bpf_dw(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ *(u32 *)&v->unref_ptr = 0;
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("kptr access cannot have variable offset")
+int non_const_var_off(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0, id;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ id = ctx->protocol;
+ if (id < 4 || id > 12)
+ return 0;
+ *(u64 *)((void *)v + id) = 0;
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("R1 doesn't have constant offset. kptr has to be")
+int non_const_var_off_kptr_xchg(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0, id;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ id = ctx->protocol;
+ if (id < 4 || id > 12)
+ return 0;
+ bpf_kptr_xchg((void *)v + id, NULL);
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("kptr access misaligned expected=8 off=7")
+int misaligned_access_write(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ *(void **)((void *)v + 7) = NULL;
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("kptr access misaligned expected=8 off=1")
+int misaligned_access_read(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ return *(u64 *)((void *)v + 1);
+}
+
+SEC("?tc")
+__failure __msg("variable untrusted_ptr_ access var_off=(0x0; 0x1e0)")
+int reject_var_off_store(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *unref_ptr;
+ struct map_value *v;
+ int key = 0, id;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ unref_ptr = v->unref_ptr;
+ if (!unref_ptr)
+ return 0;
+ id = ctx->protocol;
+ if (id < 4 || id > 12)
+ return 0;
+ unref_ptr += id;
+ v->unref_ptr = unref_ptr;
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc")
+int reject_bad_type_match(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *unref_ptr;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ unref_ptr = v->unref_ptr;
+ if (!unref_ptr)
+ return 0;
+ unref_ptr = (void *)unref_ptr + 4;
+ v->unref_ptr = unref_ptr;
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_")
+int marked_as_untrusted_or_null(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_this_cpu_ptr(v->unref_ptr);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("access beyond struct prog_test_ref_kfunc at off 32 size 4")
+int correct_btf_id_check_size(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = v->unref_ptr;
+ if (!p)
+ return 0;
+ return *(int *)((void *)p + bpf_core_type_size(struct prog_test_ref_kfunc));
+}
+
+SEC("?tc")
+__failure __msg("R1 type=untrusted_ptr_ expected=percpu_ptr_")
+int inherit_untrusted_on_walk(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *unref_ptr;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ unref_ptr = v->unref_ptr;
+ if (!unref_ptr)
+ return 0;
+ unref_ptr = unref_ptr->next;
+ bpf_this_cpu_ptr(unref_ptr);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("off=8 kptr isn't referenced kptr")
+int reject_kptr_xchg_on_unref(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_kptr_xchg(&v->unref_ptr, NULL);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("R1 type=rcu_ptr_or_null_ expected=percpu_ptr_")
+int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_this_cpu_ptr(v->ref_ptr);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("store to referenced kptr disallowed")
+int reject_untrusted_store_to_ref(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = v->ref_ptr;
+ if (!p)
+ return 0;
+ /* Checkmate, clang */
+ *(struct prog_test_ref_kfunc * volatile *)&v->ref_ptr = p;
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("R2 must be referenced")
+int reject_untrusted_xchg(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = v->ref_ptr;
+ if (!p)
+ return 0;
+ bpf_kptr_xchg(&v->ref_ptr, p);
+ return 0;
+}
+
+SEC("?tc")
+__failure
+__msg("invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member")
+int reject_bad_type_xchg(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *ref_ptr;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!ref_ptr)
+ return 0;
+ bpf_kptr_xchg(&v->ref_memb_ptr, ref_ptr);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("invalid kptr access, R2 type=ptr_prog_test_ref_kfunc")
+int reject_member_of_ref_xchg(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *ref_ptr;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!ref_ptr)
+ return 0;
+ bpf_kptr_xchg(&v->ref_memb_ptr, &ref_ptr->memb);
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("kptr cannot be accessed indirectly by helper")
+int reject_indirect_helper_access(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_get_current_comm(v, sizeof(v->buf) + 1);
+ return 0;
+}
+
+__noinline
+int write_func(int *p)
+{
+ return p ? *p = 42 : 0;
+}
+
+SEC("?tc")
+__failure __msg("kptr cannot be accessed indirectly by helper")
+int reject_indirect_global_func_access(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ return write_func((void *)v + 5);
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=4 alloc_insn=")
+int kptr_xchg_ref_state(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!p)
+ return 0;
+ bpf_kptr_xchg(&v->ref_ptr, p);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Possibly NULL pointer passed to helper arg2")
+int kptr_xchg_possibly_null(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+
+ /* PTR_TO_BTF_ID | PTR_MAYBE_NULL passed to bpf_kptr_xchg() */
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (p)
+ bpf_kfunc_call_test_release(p);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/map_percpu_stats.c b/tools/testing/selftests/bpf/progs/map_percpu_stats.c
new file mode 100644
index 000000000000..63245785eb69
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_percpu_stats.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+__u32 target_id;
+
+__s64 bpf_map_sum_elem_count(const struct bpf_map *map) __ksym;
+
+SEC("iter/bpf_map")
+int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct bpf_map *map = ctx->map;
+
+ if (map && map->id == target_id)
+ BPF_SEQ_PRINTF(seq, "%lld", bpf_map_sum_elem_count(map));
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
index c325405751e2..efaf622c28dd 100644
--- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c
+++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
@@ -12,6 +12,7 @@ _Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
__u32 g_line = 0;
+int page_size = 0; /* userspace should set it */
#define VERIFY_TYPE(type, func) ({ \
g_map_type = type; \
@@ -26,17 +27,12 @@ __u32 g_line = 0;
return 0; \
})
-struct bpf_map_memory {
- __u32 pages;
-} __attribute__((preserve_access_index));
-
struct bpf_map {
enum bpf_map_type map_type;
__u32 key_size;
__u32 value_size;
__u32 max_entries;
__u32 id;
- struct bpf_map_memory memory;
} __attribute__((preserve_access_index));
static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
@@ -47,7 +43,6 @@ static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
VERIFY(map->value_size == value_size);
VERIFY(map->max_entries == max_entries);
VERIFY(map->id > 0);
- VERIFY(map->memory.pages > 0);
return 1;
}
@@ -60,7 +55,6 @@ static inline int check_bpf_map_ptr(struct bpf_map *indirect,
VERIFY(indirect->value_size == direct->value_size);
VERIFY(indirect->max_entries == direct->max_entries);
VERIFY(indirect->id == direct->id);
- VERIFY(indirect->memory.pages == direct->memory.pages);
return 1;
}
@@ -109,6 +103,8 @@ struct {
__type(value, __u32);
} m_hash SEC(".maps");
+__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
+
static inline int check_hash(void)
{
struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
@@ -121,6 +117,8 @@ static inline int check_hash(void)
VERIFY(hash->elem_size == 64);
VERIFY(hash->count.counter == 0);
+ VERIFY(bpf_map_sum_elem_count(map) == 0);
+
for (i = 0; i < HALF_ENTRIES; ++i) {
const __u32 key = i;
const __u32 val = 1;
@@ -129,6 +127,7 @@ static inline int check_hash(void)
return 0;
}
VERIFY(hash->count.counter == HALF_ENTRIES);
+ VERIFY(bpf_map_sum_elem_count(map) == HALF_ENTRIES);
return 1;
}
@@ -317,7 +316,7 @@ struct lpm_trie {
} __attribute__((preserve_access_index));
struct lpm_key {
- struct bpf_lpm_trie_key trie_key;
+ struct bpf_lpm_trie_key_hdr trie_key;
__u32 data;
};
@@ -340,9 +339,11 @@ static inline int check_lpm_trie(void)
return 1;
}
+#define INNER_MAX_ENTRIES 1234
+
struct inner_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 1);
+ __uint(max_entries, INNER_MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} inner_map SEC(".maps");
@@ -354,7 +355,7 @@ struct {
__type(value, __u32);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 1);
+ __uint(max_entries, INNER_MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
});
@@ -366,8 +367,13 @@ static inline int check_array_of_maps(void)
{
struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
+ struct bpf_array *inner_map;
+ int key = 0;
VERIFY(check_default(&array_of_maps->map, map));
+ inner_map = bpf_map_lookup_elem(array_of_maps, &key);
+ VERIFY(inner_map != NULL);
+ VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
return 1;
}
@@ -388,8 +394,13 @@ static inline int check_hash_of_maps(void)
{
struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
+ struct bpf_htab *inner_map;
+ int key = 2;
VERIFY(check_default(&hash_of_maps->map, map));
+ inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
+ VERIFY(inner_map != NULL);
+ VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
return 1;
}
@@ -642,7 +653,6 @@ struct bpf_ringbuf_map {
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
- __uint(max_entries, 1 << 12);
} m_ringbuf SEC(".maps");
static inline int check_ringbuf(void)
@@ -650,7 +660,7 @@ static inline int check_ringbuf(void)
struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
- VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12));
+ VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
return 1;
}
@@ -690,5 +700,4 @@ int cg_skb(void *ctx)
return 1;
}
-__u32 _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c
new file mode 100644
index 000000000000..4f94c971ae86
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("tp_btf/sys_enter")
+__success
+__log_level(2)
+__msg("r8 = *(u64 *)(r7 +0) ; R7_w=ptr_nameidata(off={{[0-9]+}}) R8_w=rdonly_untrusted_mem(sz=0)")
+__msg("r9 = *(u8 *)(r8 +0) ; R8_w=rdonly_untrusted_mem(sz=0) R9_w=scalar")
+int btf_id_to_ptr_mem(void *ctx)
+{
+ struct task_struct *task;
+ struct nameidata *idata;
+ u64 ret, off;
+
+ task = bpf_get_current_task_btf();
+ idata = task->nameidata;
+ off = bpf_core_field_offset(struct nameidata, pathname);
+ /*
+ * asm block to have reliable match target for __msg, equivalent of:
+ * ret = task->nameidata->pathname[0];
+ */
+ asm volatile (
+ "r7 = %[idata];"
+ "r7 += %[off];"
+ "r8 = *(u64 *)(r7 + 0);"
+ "r9 = *(u8 *)(r8 + 0);"
+ "%[ret] = r9;"
+ : [ret]"=r"(ret)
+ : [idata]"r"(idata),
+ [off]"r"(off)
+ : "r7", "r8", "r9");
+ return ret;
+}
+
+SEC("socket")
+__success
+__retval(0)
+int ldx_is_ok_bad_addr(void *ctx)
+{
+ char *p;
+
+ if (!bpf_core_enum_value_exists(enum bpf_features, BPF_FEAT_RDONLY_CAST_TO_VOID))
+ return 42;
+
+ p = bpf_rdonly_cast(0, 0);
+ return p[0x7fff];
+}
+
+SEC("socket")
+__success
+__retval(1)
+int ldx_is_ok_good_addr(void *ctx)
+{
+ int v, *p;
+
+ v = 1;
+ p = bpf_rdonly_cast(&v, 0);
+ return *p;
+}
+
+SEC("socket")
+__success
+int offset_not_tracked(void *ctx)
+{
+ int *p, i, s;
+
+ p = bpf_rdonly_cast(0, 0);
+ s = 0;
+ bpf_for(i, 0, 1000 * 1000 * 1000) {
+ p++;
+ s += *p;
+ }
+ return s;
+}
+
+SEC("socket")
+__failure
+__msg("cannot write into rdonly_untrusted_mem")
+int stx_not_ok(void *ctx)
+{
+ int v, *p;
+
+ v = 1;
+ p = bpf_rdonly_cast(&v, 0);
+ *p = 1;
+ return 0;
+}
+
+SEC("socket")
+__failure
+__msg("cannot write into rdonly_untrusted_mem")
+int atomic_not_ok(void *ctx)
+{
+ int v, *p;
+
+ v = 1;
+ p = bpf_rdonly_cast(&v, 0);
+ __sync_fetch_and_add(p, 1);
+ return 0;
+}
+
+SEC("socket")
+__failure
+__msg("cannot write into rdonly_untrusted_mem")
+int atomic_rmw_not_ok(void *ctx)
+{
+ long v, *p;
+
+ v = 1;
+ p = bpf_rdonly_cast(&v, 0);
+ return __sync_val_compare_and_swap(p, 0, 42);
+}
+
+SEC("socket")
+__failure
+__msg("invalid access to memory, mem_size=0 off=0 size=4")
+__msg("R1 min value is outside of the allowed memory range")
+int kfunc_param_not_ok(void *ctx)
+{
+ int *p;
+
+ p = bpf_rdonly_cast(0, 0);
+ bpf_kfunc_trusted_num_test(p);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure
+__msg("R1 type=rdonly_untrusted_mem expected=")
+int helper_param_not_ok(void *ctx)
+{
+ char *p;
+
+ p = bpf_rdonly_cast(0, 0);
+ /*
+ * Any helper with ARG_CONST_SIZE_OR_ZERO constraint will do,
+ * the most permissive constraint
+ */
+ bpf_copy_from_user(p, 0, (void *)42);
+ return 0;
+}
+
+static __noinline u64 *get_some_addr(void)
+{
+ if (bpf_get_prandom_u32())
+ return bpf_rdonly_cast(0, bpf_core_type_id_kernel(struct sock));
+ else
+ return bpf_rdonly_cast(0, 0);
+}
+
+SEC("socket")
+__success
+__retval(0)
+int mixed_mem_type(void *ctx)
+{
+ u64 *p;
+
+ /* Try to avoid compiler hoisting load to if branches by using __noinline func. */
+ p = get_some_addr();
+ return *p;
+}
+
+__attribute__((__aligned__(8)))
+u8 global[] = {
+ 0x11, 0x22, 0x33, 0x44,
+ 0x55, 0x66, 0x77, 0x88,
+ 0x99
+};
+
+__always_inline
+static u64 combine(void *p)
+{
+ u64 acc;
+
+ acc = 0;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ acc |= (*(u64 *)p >> 56) << 24;
+ acc |= (*(u32 *)p >> 24) << 16;
+ acc |= (*(u16 *)p >> 8) << 8;
+ acc |= *(u8 *)p;
+#else
+ acc |= (*(u64 *)p & 0xff) << 24;
+ acc |= (*(u32 *)p & 0xff) << 16;
+ acc |= (*(u16 *)p & 0xff) << 8;
+ acc |= *(u8 *)p;
+#endif
+ return acc;
+}
+
+SEC("socket")
+__retval(0x88442211)
+int diff_size_access(void *ctx)
+{
+ return combine(bpf_rdonly_cast(&global, 0));
+}
+
+SEC("socket")
+__retval(0x99553322)
+int misaligned_access(void *ctx)
+{
+ return combine(bpf_rdonly_cast(&global, 0) + 1);
+}
+
+__weak int return_one(void)
+{
+ return 1;
+}
+
+SEC("socket")
+__success
+__retval(1)
+int null_check(void *ctx)
+{
+ int *p;
+
+ p = bpf_rdonly_cast(0, 0);
+ if (p == 0)
+ /* make this a function call to avoid compiler
+ * moving r0 assignment before check.
+ */
+ return return_one();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe.c b/tools/testing/selftests/bpf/progs/missed_kprobe.c
new file mode 100644
index 000000000000..51a4fe64c917
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/missed_kprobe.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_fentry_test1")
+int test1(struct pt_regs *ctx)
+{
+ bpf_kfunc_common_test();
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test2(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
new file mode 100644
index 000000000000..29c18d869ec1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe.multi/bpf_fentry_test1")
+int test1(struct pt_regs *ctx)
+{
+ bpf_kfunc_common_test();
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test2(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test3(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test4(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe.multi/bpf_kfunc_common_test")
+int test5(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe.session/bpf_kfunc_common_test")
+int test6(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/missed_tp_recursion.c b/tools/testing/selftests/bpf/progs/missed_tp_recursion.c
new file mode 100644
index 000000000000..762385f827c5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/missed_tp_recursion.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_fentry_test1")
+int test1(struct pt_regs *ctx)
+{
+ bpf_printk("test");
+ return 0;
+}
+
+SEC("tp/bpf_trace/bpf_trace_printk")
+int test2(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("tp/bpf_trace/bpf_trace_printk")
+int test3(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("tp/bpf_trace/bpf_trace_printk")
+int test4(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/mmap_inner_array.c b/tools/testing/selftests/bpf/progs/mmap_inner_array.c
new file mode 100644
index 000000000000..90aacbc2938a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mmap_inner_array.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct inner_array_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(max_entries, 1);
+} inner_array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+ __array(values, struct inner_array_type);
+} outer_map SEC(".maps");
+
+int pid = 0;
+__u64 match_value = 0x13572468;
+bool done = false;
+bool pid_match = false;
+bool outer_map_match = false;
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int add_to_list_in_inner_array(void *ctx)
+{
+ __u32 curr_pid, zero = 0;
+ struct bpf_map *map;
+ __u64 *value;
+
+ curr_pid = (u32)bpf_get_current_pid_tgid();
+ if (done || curr_pid != pid)
+ return 0;
+
+ pid_match = true;
+ map = bpf_map_lookup_elem(&outer_map, &curr_pid);
+ if (!map)
+ return 0;
+
+ outer_map_match = true;
+ value = bpf_map_lookup_elem(map, &zero);
+ if (!value)
+ return 0;
+
+ *value = match_value;
+ done = true;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/modify_return.c b/tools/testing/selftests/bpf/progs/modify_return.c
index 8b7466a15c6b..3376d4849f58 100644
--- a/tools/testing/selftests/bpf/progs/modify_return.c
+++ b/tools/testing/selftests/bpf/progs/modify_return.c
@@ -47,3 +47,43 @@ int BPF_PROG(fexit_test, int a, __u64 b, int ret)
return 0;
}
+
+static int sequence2;
+
+__u64 fentry_result2 = 0;
+SEC("fentry/bpf_modify_return_test2")
+int BPF_PROG(fentry_test2, int a, int *b, short c, int d, void *e, char f,
+ int g)
+{
+ sequence2++;
+ fentry_result2 = (sequence2 == 1);
+ return 0;
+}
+
+__u64 fmod_ret_result2 = 0;
+SEC("fmod_ret/bpf_modify_return_test2")
+int BPF_PROG(fmod_ret_test2, int a, int *b, short c, int d, void *e, char f,
+ int g, int ret)
+{
+ sequence2++;
+ /* This is the first fmod_ret program, the ret passed should be 0 */
+ fmod_ret_result2 = (sequence2 == 2 && ret == 0);
+ return input_retval;
+}
+
+__u64 fexit_result2 = 0;
+SEC("fexit/bpf_modify_return_test2")
+int BPF_PROG(fexit_test2, int a, int *b, short c, int d, void *e, char f,
+ int g, int ret)
+{
+ sequence2++;
+ /* If the input_reval is non-zero a successful modification should have
+ * occurred.
+ */
+ if (input_retval)
+ fexit_result2 = (sequence2 == 3 && ret == input_retval);
+ else
+ fexit_result2 = (sequence2 == 3 && ret == 29);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf.h b/tools/testing/selftests/bpf/progs/mptcp_bpf.h
new file mode 100644
index 000000000000..3b188ccdcc40
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __MPTCP_BPF_H__
+#define __MPTCP_BPF_H__
+
+#include "bpf_experimental.h"
+
+/* list helpers from include/linux/list.h */
+static inline int list_is_head(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list == head;
+}
+
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+#define list_next_entry(pos, member) \
+ list_entry((pos)->member.next, typeof(*(pos)), member)
+
+#define list_entry_is_head(pos, head, member) \
+ list_is_head(&pos->member, (head))
+
+/* small difference: 'can_loop' has been added in the conditions */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_first_entry(head, typeof(*pos), member); \
+ !list_entry_is_head(pos, head, member) && can_loop; \
+ pos = list_next_entry(pos, member))
+
+/* mptcp helpers from protocol.h */
+#define mptcp_for_each_subflow(__msk, __subflow) \
+ list_for_each_entry(__subflow, &((__msk)->conn_list), node)
+
+static __always_inline struct sock *
+mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow)
+{
+ return subflow->tcp_sock;
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/mptcp_sock.c b/tools/testing/selftests/bpf/progs/mptcp_sock.c
new file mode 100644
index 000000000000..f3acb90588c7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_sock.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Tessares SA. */
+/* Copyright (c) 2022, SUSE. */
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+__u32 token = 0;
+
+struct mptcp_storage {
+ __u32 invoked;
+ __u32 is_mptcp;
+ struct sock *sk;
+ __u32 token;
+ struct sock *first;
+ char ca_name[TCP_CA_NAME_MAX];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct mptcp_storage);
+} socket_storage_map SEC(".maps");
+
+SEC("sockops")
+int _sockops(struct bpf_sock_ops *ctx)
+{
+ struct mptcp_storage *storage;
+ struct mptcp_sock *msk;
+ int op = (int)ctx->op;
+ struct tcp_sock *tsk;
+ struct bpf_sock *sk;
+ bool is_mptcp;
+
+ if (op != BPF_SOCK_OPS_TCP_CONNECT_CB)
+ return 1;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ tsk = bpf_skc_to_tcp_sock(sk);
+ if (!tsk)
+ return 1;
+
+ is_mptcp = bpf_core_field_exists(tsk->is_mptcp) ? tsk->is_mptcp : 0;
+ if (!is_mptcp) {
+ storage = bpf_sk_storage_get(&socket_storage_map, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 1;
+
+ storage->token = 0;
+ __builtin_memset(storage->ca_name, 0, TCP_CA_NAME_MAX);
+ storage->first = NULL;
+ } else {
+ msk = bpf_skc_to_mptcp_sock(sk);
+ if (!msk)
+ return 1;
+
+ storage = bpf_sk_storage_get(&socket_storage_map, msk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 1;
+
+ storage->token = msk->token;
+ __builtin_memcpy(storage->ca_name, msk->ca_name, TCP_CA_NAME_MAX);
+ storage->first = msk->first;
+ }
+ storage->invoked++;
+ storage->is_mptcp = is_mptcp;
+ storage->sk = (struct sock *)sk;
+
+ return 1;
+}
+
+SEC("fentry/mptcp_pm_new_connection")
+int BPF_PROG(trace_mptcp_pm_new_connection, struct mptcp_sock *msk,
+ const struct sock *ssk, int server_side)
+{
+ if (!server_side)
+ token = msk->token;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/mptcp_subflow.c b/tools/testing/selftests/bpf/progs/mptcp_subflow.c
new file mode 100644
index 000000000000..70302477e326
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_subflow.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Tessares SA. */
+/* Copyright (c) 2024, Kylin Software */
+
+/* vmlinux.h, bpf_helpers.h and other 'define' */
+#include "bpf_tracing_net.h"
+#include "mptcp_bpf.h"
+
+char _license[] SEC("license") = "GPL";
+
+char cc[TCP_CA_NAME_MAX] = "reno";
+int pid;
+
+/* Associate a subflow counter to each token */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+ __uint(max_entries, 100);
+} mptcp_sf SEC(".maps");
+
+SEC("sockops")
+int mptcp_subflow(struct bpf_sock_ops *skops)
+{
+ __u32 init = 1, key, mark, *cnt;
+ struct mptcp_sock *msk;
+ struct bpf_sock *sk;
+ int err;
+
+ if (skops->op != BPF_SOCK_OPS_TCP_CONNECT_CB)
+ return 1;
+
+ sk = skops->sk;
+ if (!sk)
+ return 1;
+
+ msk = bpf_skc_to_mptcp_sock(sk);
+ if (!msk)
+ return 1;
+
+ key = msk->token;
+ cnt = bpf_map_lookup_elem(&mptcp_sf, &key);
+ if (cnt) {
+ /* A new subflow is added to an existing MPTCP connection */
+ __sync_fetch_and_add(cnt, 1);
+ mark = *cnt;
+ } else {
+ /* A new MPTCP connection is just initiated and this is its primary subflow */
+ bpf_map_update_elem(&mptcp_sf, &key, &init, BPF_ANY);
+ mark = init;
+ }
+
+ /* Set the mark of the subflow's socket based on appearance order */
+ err = bpf_setsockopt(skops, SOL_SOCKET, SO_MARK, &mark, sizeof(mark));
+ if (err < 0)
+ return 1;
+ if (mark == 2)
+ err = bpf_setsockopt(skops, SOL_TCP, TCP_CONGESTION, cc, TCP_CA_NAME_MAX);
+
+ return 1;
+}
+
+static int _check_getsockopt_subflow_mark(struct mptcp_sock *msk, struct bpf_sockopt *ctx)
+{
+ struct mptcp_subflow_context *subflow;
+ int i = 0;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk;
+
+ ssk = mptcp_subflow_tcp_sock(bpf_core_cast(subflow,
+ struct mptcp_subflow_context));
+
+ if (ssk->sk_mark != ++i) {
+ ctx->retval = -2;
+ break;
+ }
+ }
+
+ return 1;
+}
+
+static int _check_getsockopt_subflow_cc(struct mptcp_sock *msk, struct bpf_sockopt *ctx)
+{
+ struct mptcp_subflow_context *subflow;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct inet_connection_sock *icsk;
+ struct sock *ssk;
+
+ ssk = mptcp_subflow_tcp_sock(bpf_core_cast(subflow,
+ struct mptcp_subflow_context));
+ icsk = bpf_core_cast(ssk, struct inet_connection_sock);
+
+ if (ssk->sk_mark == 2 &&
+ __builtin_memcmp(icsk->icsk_ca_ops->name, cc, TCP_CA_NAME_MAX)) {
+ ctx->retval = -2;
+ break;
+ }
+ }
+
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int _getsockopt_subflow(struct bpf_sockopt *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ struct mptcp_sock *msk;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ if (!sk || sk->protocol != IPPROTO_MPTCP ||
+ (!(ctx->level == SOL_SOCKET && ctx->optname == SO_MARK) &&
+ !(ctx->level == SOL_TCP && ctx->optname == TCP_CONGESTION)))
+ return 1;
+
+ msk = bpf_core_cast(sk, struct mptcp_sock);
+ if (msk->pm.subflows != 1) {
+ ctx->retval = -1;
+ return 1;
+ }
+
+ if (ctx->optname == SO_MARK)
+ return _check_getsockopt_subflow_mark(msk, ctx);
+ return _check_getsockopt_subflow_cc(msk, ctx);
+}
diff --git a/tools/testing/selftests/bpf/progs/mptcpify.c b/tools/testing/selftests/bpf/progs/mptcpify.c
new file mode 100644
index 000000000000..cbdc730c3a47
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcpify.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023, SUSE. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include "bpf_tracing_net.h"
+
+char _license[] SEC("license") = "GPL";
+int pid;
+
+SEC("fmod_ret/update_socket_protocol")
+int BPF_PROG(mptcpify, int family, int type, int protocol)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return protocol;
+
+ if ((family == AF_INET || family == AF_INET6) &&
+ type == SOCK_STREAM &&
+ (!protocol || protocol == IPPROTO_TCP)) {
+ return IPPROTO_MPTCP;
+ }
+
+ return protocol;
+}
diff --git a/tools/testing/selftests/bpf/progs/nested_acquire.c b/tools/testing/selftests/bpf/progs/nested_acquire.c
new file mode 100644
index 000000000000..49ad7b9adf56
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/nested_acquire.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("tp_btf/tcp_probe")
+__success
+int BPF_PROG(test_nested_acquire_nonzero, struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *ptr;
+
+ ptr = bpf_kfunc_nested_acquire_nonzero_offset_test(&sk->sk_write_queue);
+
+ bpf_kfunc_nested_release_test(ptr);
+ return 0;
+}
+
+SEC("tp_btf/tcp_probe")
+__success
+int BPF_PROG(test_nested_acquire_zero, struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *ptr;
+
+ ptr = bpf_kfunc_nested_acquire_zero_offset_test(&sk->__sk_common);
+
+ bpf_kfunc_nested_release_test(ptr);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_common.h b/tools/testing/selftests/bpf/progs/nested_trust_common.h
new file mode 100644
index 000000000000..1784b496be2e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/nested_trust_common.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _NESTED_TRUST_COMMON_H
+#define _NESTED_TRUST_COMMON_H
+
+#include <stdbool.h>
+
+bool bpf_cpumask_test_cpu(unsigned int cpu, const struct cpumask *cpumask) __ksym;
+__u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
+
+#endif /* _NESTED_TRUST_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_failure.c b/tools/testing/selftests/bpf/progs/nested_trust_failure.c
new file mode 100644
index 000000000000..3568ec450100
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/nested_trust_failure.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#include "nested_trust_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, u64);
+} sk_storage_map SEC(".maps");
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+
+SEC("tp_btf/task_newtask")
+__failure __msg("R2 must be")
+int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_flags)
+{
+ bpf_cpumask_test_cpu(0, task->user_cpus_ptr);
+ return 0;
+}
+
+/* Although R2 is of type sk_buff but sock_common is expected, we will hit untrusted ptr first. */
+SEC("tp_btf/tcp_probe")
+__failure __msg("R2 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_")
+int BPF_PROG(test_invalid_skb_field, struct sock *sk, struct sk_buff *skb)
+{
+ bpf_sk_storage_get(&sk_storage_map, skb->next, 0, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_success.c b/tools/testing/selftests/bpf/progs/nested_trust_success.c
new file mode 100644
index 000000000000..2b66953ca82e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/nested_trust_success.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#include "nested_trust_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, u64);
+} sk_storage_map SEC(".maps");
+
+SEC("tp_btf/task_newtask")
+__success
+int BPF_PROG(test_read_cpumask, struct task_struct *task, u64 clone_flags)
+{
+ bpf_cpumask_test_cpu(0, task->cpus_ptr);
+ return 0;
+}
+
+SEC("tp_btf/tcp_probe")
+__success
+int BPF_PROG(test_skb_field, struct sock *sk, struct sk_buff *skb)
+{
+ bpf_sk_storage_get(&sk_storage_map, skb->sk, 0, 0);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__success
+int BPF_PROG(test_nested_offset, struct task_struct *task, u64 clone_flags)
+{
+ bpf_cpumask_first_zero(&task->cpus_mask);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/net_timestamping.c b/tools/testing/selftests/bpf/progs/net_timestamping.c
new file mode 100644
index 000000000000..b4c2f0f2be11
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/net_timestamping.c
@@ -0,0 +1,248 @@
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include <errno.h>
+
+__u32 monitored_pid = 0;
+
+int nr_active;
+int nr_snd;
+int nr_passive;
+int nr_sched;
+int nr_txsw;
+int nr_ack;
+
+struct sk_stg {
+ __u64 sendmsg_ns; /* record ts when sendmsg is called */
+};
+
+struct sk_tskey {
+ u64 cookie;
+ u32 tskey;
+};
+
+struct delay_info {
+ u64 sendmsg_ns; /* record ts when sendmsg is called */
+ u32 sched_delay; /* SCHED_CB - sendmsg_ns */
+ u32 snd_sw_delay; /* SND_SW_CB - SCHED_CB */
+ u32 ack_delay; /* ACK_CB - SND_SW_CB */
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct sk_stg);
+} sk_stg_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct sk_tskey);
+ __type(value, struct delay_info);
+ __uint(max_entries, 1024);
+} time_map SEC(".maps");
+
+static u64 delay_tolerance_nsec = 10000000000; /* 10 second as an example */
+
+extern int bpf_sock_ops_enable_tx_tstamp(struct bpf_sock_ops_kern *skops, u64 flags) __ksym;
+
+static int bpf_test_sockopt(void *ctx, const struct sock *sk, int expected)
+{
+ int tmp, new = SK_BPF_CB_TX_TIMESTAMPING;
+ int opt = SK_BPF_CB_FLAGS;
+ int level = SOL_SOCKET;
+
+ if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new)) != expected)
+ return 1;
+
+ if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) != expected ||
+ (!expected && tmp != new))
+ return 1;
+
+ return 0;
+}
+
+static bool bpf_test_access_sockopt(void *ctx, const struct sock *sk)
+{
+ if (bpf_test_sockopt(ctx, sk, -EOPNOTSUPP))
+ return true;
+ return false;
+}
+
+static bool bpf_test_access_load_hdr_opt(struct bpf_sock_ops *skops)
+{
+ u8 opt[3] = {0};
+ int load_flags = 0;
+ int ret;
+
+ ret = bpf_load_hdr_opt(skops, opt, sizeof(opt), load_flags);
+ if (ret != -EOPNOTSUPP)
+ return true;
+
+ return false;
+}
+
+static bool bpf_test_access_cb_flags_set(struct bpf_sock_ops *skops)
+{
+ int ret;
+
+ ret = bpf_sock_ops_cb_flags_set(skops, 0);
+ if (ret != -EOPNOTSUPP)
+ return true;
+
+ return false;
+}
+
+/* In the timestamping callbacks, we're not allowed to call the following
+ * BPF CALLs for the safety concern. Return false if expected.
+ */
+static bool bpf_test_access_bpf_calls(struct bpf_sock_ops *skops,
+ const struct sock *sk)
+{
+ if (bpf_test_access_sockopt(skops, sk))
+ return true;
+
+ if (bpf_test_access_load_hdr_opt(skops))
+ return true;
+
+ if (bpf_test_access_cb_flags_set(skops))
+ return true;
+
+ return false;
+}
+
+static bool bpf_test_delay(struct bpf_sock_ops *skops, const struct sock *sk)
+{
+ struct bpf_sock_ops_kern *skops_kern;
+ u64 timestamp = bpf_ktime_get_ns();
+ struct skb_shared_info *shinfo;
+ struct delay_info dinfo = {0};
+ struct sk_tskey key = {0};
+ struct delay_info *val;
+ struct sk_buff *skb;
+ struct sk_stg *stg;
+ u64 prior_ts, delay;
+
+ if (bpf_test_access_bpf_calls(skops, sk))
+ return false;
+
+ skops_kern = bpf_cast_to_kern_ctx(skops);
+ skb = skops_kern->skb;
+ shinfo = bpf_core_cast(skb->head + skb->end, struct skb_shared_info);
+
+ key.cookie = bpf_get_socket_cookie(skops);
+ if (!key.cookie)
+ return false;
+
+ if (skops->op == BPF_SOCK_OPS_TSTAMP_SENDMSG_CB) {
+ stg = bpf_sk_storage_get(&sk_stg_map, (void *)sk, 0, 0);
+ if (!stg)
+ return false;
+ dinfo.sendmsg_ns = stg->sendmsg_ns;
+ bpf_sock_ops_enable_tx_tstamp(skops_kern, 0);
+ key.tskey = shinfo->tskey;
+ if (!key.tskey)
+ return false;
+ bpf_map_update_elem(&time_map, &key, &dinfo, BPF_ANY);
+ return true;
+ }
+
+ key.tskey = shinfo->tskey;
+ if (!key.tskey)
+ return false;
+
+ val = bpf_map_lookup_elem(&time_map, &key);
+ if (!val)
+ return false;
+
+ switch (skops->op) {
+ case BPF_SOCK_OPS_TSTAMP_SCHED_CB:
+ val->sched_delay = timestamp - val->sendmsg_ns;
+ delay = val->sched_delay;
+ break;
+ case BPF_SOCK_OPS_TSTAMP_SND_SW_CB:
+ prior_ts = val->sched_delay + val->sendmsg_ns;
+ val->snd_sw_delay = timestamp - prior_ts;
+ delay = val->snd_sw_delay;
+ break;
+ case BPF_SOCK_OPS_TSTAMP_ACK_CB:
+ prior_ts = val->snd_sw_delay + val->sched_delay + val->sendmsg_ns;
+ val->ack_delay = timestamp - prior_ts;
+ delay = val->ack_delay;
+ break;
+ }
+
+ if (delay >= delay_tolerance_nsec)
+ return false;
+
+ /* Since it's the last one, remove from the map after latency check */
+ if (skops->op == BPF_SOCK_OPS_TSTAMP_ACK_CB)
+ bpf_map_delete_elem(&time_map, &key);
+
+ return true;
+}
+
+SEC("fentry/tcp_sendmsg_locked")
+int BPF_PROG(trace_tcp_sendmsg_locked, struct sock *sk, struct msghdr *msg,
+ size_t size)
+{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ u64 timestamp = bpf_ktime_get_ns();
+ u32 flag = sk->sk_bpf_cb_flags;
+ struct sk_stg *stg;
+
+ if (pid != monitored_pid || !flag)
+ return 0;
+
+ stg = bpf_sk_storage_get(&sk_stg_map, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!stg)
+ return 0;
+
+ stg->sendmsg_ns = timestamp;
+ nr_snd += 1;
+ return 0;
+}
+
+SEC("sockops")
+int skops_sockopt(struct bpf_sock_ops *skops)
+{
+ struct bpf_sock *bpf_sk = skops->sk;
+ const struct sock *sk;
+
+ if (!bpf_sk)
+ return 1;
+
+ sk = (struct sock *)bpf_skc_to_tcp_sock(bpf_sk);
+ if (!sk)
+ return 1;
+
+ switch (skops->op) {
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ nr_active += !bpf_test_sockopt(skops, sk, 0);
+ break;
+ case BPF_SOCK_OPS_TSTAMP_SENDMSG_CB:
+ if (bpf_test_delay(skops, sk))
+ nr_snd += 1;
+ break;
+ case BPF_SOCK_OPS_TSTAMP_SCHED_CB:
+ if (bpf_test_delay(skops, sk))
+ nr_sched += 1;
+ break;
+ case BPF_SOCK_OPS_TSTAMP_SND_SW_CB:
+ if (bpf_test_delay(skops, sk))
+ nr_txsw += 1;
+ break;
+ case BPF_SOCK_OPS_TSTAMP_ACK_CB:
+ if (bpf_test_delay(skops, sk))
+ nr_ack += 1;
+ break;
+ }
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c
index d071adf178bd..f9ef8aee56f1 100644
--- a/tools/testing/selftests/bpf/progs/netcnt_prog.c
+++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c
@@ -13,21 +13,20 @@
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
- __type(value, struct percpu_net_cnt);
+ __type(value, union percpu_net_cnt);
} percpu_netcnt SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
- __type(value, struct net_cnt);
+ __type(value, union net_cnt);
} netcnt SEC(".maps");
SEC("cgroup/skb")
int bpf_nextcnt(struct __sk_buff *skb)
{
- struct percpu_net_cnt *percpu_cnt;
- char fmt[] = "%d %llu %llu\n";
- struct net_cnt *cnt;
+ union percpu_net_cnt *percpu_cnt;
+ union net_cnt *cnt;
__u64 ts, dt;
int ret;
@@ -68,4 +67,3 @@ int bpf_nextcnt(struct __sk_buff *skb)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/testing/selftests/bpf/progs/netif_receive_skb.c b/tools/testing/selftests/bpf/progs/netif_receive_skb.c
index 6b670039ea67..9e067dcbf607 100644
--- a/tools/testing/selftests/bpf/progs/netif_receive_skb.c
+++ b/tools/testing/selftests/bpf/progs/netif_receive_skb.c
@@ -5,6 +5,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
#include <errno.h>
@@ -16,8 +17,11 @@ bool skip = false;
#define STRSIZE 2048
#define EXPECTED_STRSIZE 256
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#if defined(bpf_target_s390)
+/* NULL points to a readable struct lowcore on s390, so take the last page */
+#define BADPTR ((void *)0xFFFFFFFFFFFFF000ULL)
+#else
+#define BADPTR 0
#endif
struct {
@@ -46,7 +50,6 @@ static int __strncmp(const void *m1, const void *m2, size_t len)
do { \
static const char _expectedval[EXPECTED_STRSIZE] = \
_expected; \
- static const char _ptrtype[64] = #_type; \
__u64 _hflags = _flags | BTF_F_COMPACT; \
static _type _ptrdata = __VA_ARGS__; \
static struct btf_ptr _ptr = { }; \
@@ -113,11 +116,11 @@ int BPF_PROG(trace_netif_receive_skb, struct sk_buff *skb)
}
/* Check invalid ptr value */
- p.ptr = 0;
+ p.ptr = BADPTR;
__ret = bpf_snprintf_btf(str, STRSIZE, &p, sizeof(p), 0);
if (__ret >= 0) {
- bpf_printk("printing NULL should generate error, got (%d)",
- __ret);
+ bpf_printk("printing %llx should generate error, got (%d)",
+ (unsigned long long)BADPTR, __ret);
ret = -ERANGE;
}
diff --git a/tools/testing/selftests/bpf/progs/netns_cookie_prog.c b/tools/testing/selftests/bpf/progs/netns_cookie_prog.c
new file mode 100644
index 000000000000..94040714af18
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/netns_cookie_prog.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+
+#define AF_INET6 10
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} sockops_netns_cookies SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} sk_msg_netns_cookies SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} sock_map SEC(".maps");
+
+int tcx_init_netns_cookie, tcx_netns_cookie;
+int cgroup_skb_init_netns_cookie, cgroup_skb_netns_cookie;
+
+SEC("sockops")
+int get_netns_cookie_sockops(struct bpf_sock_ops *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ int *cookie;
+ __u32 key = 0;
+
+ if (ctx->family != AF_INET6)
+ return 1;
+
+ if (!sk)
+ return 1;
+
+ switch (ctx->op) {
+ case BPF_SOCK_OPS_TCP_CONNECT_CB:
+ cookie = bpf_sk_storage_get(&sockops_netns_cookies, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!cookie)
+ return 1;
+
+ *cookie = bpf_get_netns_cookie(ctx);
+ break;
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ bpf_sock_map_update(ctx, &sock_map, &key, BPF_NOEXIST);
+ break;
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+SEC("sk_msg")
+int get_netns_cookie_sk_msg(struct sk_msg_md *msg)
+{
+ struct bpf_sock *sk = msg->sk;
+ int *cookie;
+
+ if (msg->family != AF_INET6)
+ return 1;
+
+ if (!sk)
+ return 1;
+
+ cookie = bpf_sk_storage_get(&sk_msg_netns_cookies, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!cookie)
+ return 1;
+
+ *cookie = bpf_get_netns_cookie(msg);
+
+ return 1;
+}
+
+SEC("tcx/ingress")
+int get_netns_cookie_tcx(struct __sk_buff *skb)
+{
+ tcx_init_netns_cookie = bpf_get_netns_cookie(NULL);
+ tcx_netns_cookie = bpf_get_netns_cookie(skb);
+ return TCX_PASS;
+}
+
+SEC("cgroup_skb/ingress")
+int get_netns_cookie_cgroup_skb(struct __sk_buff *skb)
+{
+ cgroup_skb_init_netns_cookie = bpf_get_netns_cookie(NULL);
+ cgroup_skb_netns_cookie = bpf_get_netns_cookie(skb);
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/normal_map_btf.c b/tools/testing/selftests/bpf/progs/normal_map_btf.c
new file mode 100644
index 000000000000..a45c9299552c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/normal_map_btf.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct node_data {
+ __u64 data;
+ struct bpf_list_node node;
+};
+
+struct map_value {
+ struct bpf_list_head head __contains(node_data, node);
+ struct bpf_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} array SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+int pid = 0;
+bool done = false;
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int add_to_list_in_array(void *ctx)
+{
+ struct map_value *value;
+ struct node_data *new;
+ int zero = 0;
+
+ if (done || (int)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ value = bpf_map_lookup_elem(&array, &zero);
+ if (!value)
+ return 0;
+
+ new = bpf_obj_new(typeof(*new));
+ if (!new)
+ return 0;
+
+ bpf_spin_lock(&value->lock);
+ bpf_list_push_back(&value->head, &new->node);
+ bpf_spin_unlock(&value->lock);
+ done = true;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_array.c b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
new file mode 100644
index 000000000000..37c2d2608ec0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
@@ -0,0 +1,190 @@
+#include "bpf_experimental.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+const volatile int nr_cpus;
+
+/* Initialize the percpu object */
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(test_array_map_1)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+/* Update percpu data */
+SEC("?fentry/bpf_fentry_test2")
+int BPF_PROG(test_array_map_2)
+{
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ return 0;
+ v->c = 1;
+ v->d = 2;
+
+ return 0;
+}
+
+int cpu0_field_d, sum_field_c;
+int my_pid;
+
+/* Summarize percpu data */
+SEC("?fentry/bpf_fentry_test3")
+int BPF_PROG(test_array_map_3)
+{
+ struct val_t __percpu_kptr *p;
+ int i, index = 0;
+ struct val_t *v;
+ struct elem *e;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
+ return 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ return 0;
+}
+
+/* Explicitly free allocated percpu data */
+SEC("?fentry/bpf_fentry_test4")
+int BPF_PROG(test_array_map_4)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ /* delete */
+ p = bpf_kptr_xchg(&e->pc, NULL);
+ if (p) {
+ bpf_percpu_obj_drop(p);
+ }
+
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+int BPF_PROG(test_array_map_10)
+{
+ struct val_t __percpu_kptr *p, *p1;
+ int i, index = 0;
+ struct val_t *v;
+ struct elem *e;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
+ return 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ bpf_rcu_read_lock();
+ p = e->pc;
+ if (!p) {
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ goto out;
+
+ p1 = bpf_kptr_xchg(&e->pc, p);
+ if (p1) {
+ /* race condition */
+ bpf_percpu_obj_drop(p1);
+ }
+ }
+
+ v = bpf_this_cpu_ptr(p);
+ v->c = 3;
+ v = bpf_this_cpu_ptr(p);
+ v->c = 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ goto out;
+ v->c = 1;
+ v->d = 2;
+
+ /* delete */
+ p1 = bpf_kptr_xchg(&e->pc, NULL);
+ if (!p1)
+ goto out;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ /* finally release p */
+ bpf_percpu_obj_drop(p1);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c b/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
new file mode 100644
index 000000000000..a2acf9aa6c24
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
@@ -0,0 +1,109 @@
+#include "bpf_experimental.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct elem);
+} cgrp SEC(".maps");
+
+const volatile int nr_cpus;
+
+/* Initialize the percpu object */
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test_cgrp_local_storage_1)
+{
+ struct task_struct *task;
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+/* Percpu data collection */
+SEC("fentry/bpf_fentry_test2")
+int BPF_PROG(test_cgrp_local_storage_2)
+{
+ struct task_struct *task;
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ return 0;
+ v->c = 1;
+ v->d = 2;
+ return 0;
+}
+
+int cpu0_field_d, sum_field_c;
+int my_pid;
+
+/* Summarize percpu data collection */
+SEC("fentry/bpf_fentry_test3")
+int BPF_PROG(test_cgrp_local_storage_3)
+{
+ struct task_struct *task;
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+ int i;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
+ return 0;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
new file mode 100644
index 000000000000..f2b8eb2ff76f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
@@ -0,0 +1,182 @@
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct val2_t {
+ long b;
+};
+
+struct val_with_ptr_t {
+ char *p;
+};
+
+struct val_with_rb_root_t {
+ struct bpf_spin_lock lock;
+};
+
+struct val_600b_t {
+ char b[600];
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+long ret;
+
+SEC("?fentry/bpf_fentry_test1")
+__failure __msg("store to referenced kptr disallowed")
+int BPF_PROG(test_array_map_1)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ e->pc = (struct val_t __percpu_kptr *)ret;
+ return 0;
+}
+
+SEC("?fentry/bpf_fentry_test1")
+__failure __msg("invalid kptr access, R2 type=percpu_ptr_val2_t expected=ptr_val_t")
+int BPF_PROG(test_array_map_2)
+{
+ struct val2_t __percpu_kptr *p2;
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p2 = bpf_percpu_obj_new(struct val2_t);
+ if (!p2)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p2);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("R1 type=scalar expected=percpu_ptr_, percpu_rcu_ptr_, percpu_trusted_ptr_")
+int BPF_PROG(test_array_map_3)
+{
+ struct val_t __percpu_kptr *p, *p1;
+ struct val_t *v;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p1 = bpf_kptr_xchg(&e->pc, p);
+ if (p1)
+ bpf_percpu_obj_drop(p1);
+
+ v = bpf_this_cpu_ptr(p);
+ ret = v->b;
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("arg#0 expected for bpf_percpu_obj_drop_impl()")
+int BPF_PROG(test_array_map_4)
+{
+ struct val_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ bpf_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("arg#0 expected for bpf_obj_drop_impl()")
+int BPF_PROG(test_array_map_5)
+{
+ struct val_t *p;
+
+ p = bpf_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type ID argument must be of a struct of scalars")
+int BPF_PROG(test_array_map_6)
+{
+ struct val_with_ptr_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_with_ptr_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type ID argument must not contain special fields")
+int BPF_PROG(test_array_map_7)
+{
+ struct val_with_rb_root_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_with_rb_root_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type size (600) is greater than 512")
+int BPF_PROG(test_array_map_8)
+{
+ struct val_600b_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_600b_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
index 25467d13c356..f793280a3238 100644
--- a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
+++ b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
@@ -11,8 +11,8 @@ typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 16384);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(stack_trace_t));
+ __type(key, __u32);
+ __type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
@@ -35,10 +35,10 @@ int oncpu(void *ctx)
long val;
val = bpf_get_stackid(ctx, &stackmap, 0);
- if (val > 0)
+ if (val >= 0)
stackid_kernel = 2;
val = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK);
- if (val > 0)
+ if (val >= 0)
stackid_user = 2;
trace = bpf_map_lookup_elem(&stackdata_map, &key);
diff --git a/tools/testing/selftests/bpf/progs/perfbuf_bench.c b/tools/testing/selftests/bpf/progs/perfbuf_bench.c
index e5ab4836a641..29c1639fc78a 100644
--- a/tools/testing/selftests/bpf/progs/perfbuf_bench.c
+++ b/tools/testing/selftests/bpf/progs/perfbuf_bench.c
@@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
@@ -18,10 +19,9 @@ const volatile int batch_cnt = 0;
long sample_val = 42;
long dropped __attribute__((aligned(128))) = 0;
-SEC("fentry/__x64_sys_getpgid")
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bench_perfbuf(void *ctx)
{
- __u64 *sample;
int i;
for (i = 0; i < batch_cnt; i++) {
diff --git a/tools/testing/selftests/bpf/progs/preempt_lock.c b/tools/testing/selftests/bpf/progs/preempt_lock.c
new file mode 100644
index 000000000000..7d04254e61f1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/preempt_lock.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym;
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_1(struct __sk_buff *ctx)
+{
+ bpf_preempt_disable();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_2(struct __sk_buff *ctx)
+{
+ bpf_preempt_disable();
+ bpf_preempt_disable();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_3(struct __sk_buff *ctx)
+{
+ bpf_preempt_disable();
+ bpf_preempt_disable();
+ bpf_preempt_disable();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_3_minus_2(struct __sk_buff *ctx)
+{
+ bpf_preempt_disable();
+ bpf_preempt_disable();
+ bpf_preempt_disable();
+ bpf_preempt_enable();
+ bpf_preempt_enable();
+ return 0;
+}
+
+static __noinline void preempt_disable(void)
+{
+ bpf_preempt_disable();
+}
+
+static __noinline void preempt_enable(void)
+{
+ bpf_preempt_enable();
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_1_subprog(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_2_subprog(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ preempt_disable();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_2_minus_1_subprog(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ preempt_disable();
+ preempt_enable();
+ return 0;
+}
+
+static __noinline void preempt_balance_subprog(void)
+{
+ preempt_disable();
+ preempt_enable();
+}
+
+SEC("?tc")
+__success int preempt_balance(struct __sk_buff *ctx)
+{
+ bpf_guard_preempt();
+ return 0;
+}
+
+SEC("?tc")
+__success int preempt_balance_subprog_test(struct __sk_buff *ctx)
+{
+ preempt_balance_subprog();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("sleepable helper bpf_copy_from_user#")
+int preempt_sleepable_helper(void *ctx)
+{
+ u32 data;
+
+ bpf_preempt_disable();
+ bpf_copy_from_user(&data, sizeof(data), NULL);
+ bpf_preempt_enable();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("kernel func bpf_copy_from_user_str is sleepable within non-preemptible region")
+int preempt_sleepable_kfunc(void *ctx)
+{
+ u32 data;
+
+ bpf_preempt_disable();
+ bpf_copy_from_user_str(&data, sizeof(data), NULL, 0);
+ bpf_preempt_enable();
+ return 0;
+}
+
+int __noinline preempt_global_subprog(void)
+{
+ preempt_balance_subprog();
+ return 0;
+}
+
+SEC("?tc")
+__success
+int preempt_global_subprog_test(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ preempt_global_subprog();
+ preempt_enable();
+ return 0;
+}
+
+int __noinline
+global_subprog(int i)
+{
+ if (i)
+ bpf_printk("%p", &i);
+ return i;
+}
+
+int __noinline
+global_sleepable_helper_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+int __noinline
+global_sleepable_kfunc_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user_str(&i, sizeof(i), NULL, 0);
+ global_subprog(i);
+ return i;
+}
+
+int __noinline
+global_subprog_calling_sleepable_global(int i)
+{
+ if (!i)
+ global_sleepable_kfunc_subprog(i);
+ return i;
+}
+
+SEC("?syscall")
+__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+int preempt_global_sleepable_helper_subprog(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ if (ctx->mark)
+ global_sleepable_helper_subprog(ctx->mark);
+ preempt_enable();
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+int preempt_global_sleepable_kfunc_subprog(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ if (ctx->mark)
+ global_sleepable_kfunc_subprog(ctx->mark);
+ preempt_enable();
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+int preempt_global_sleepable_subprog_indirect(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ if (ctx->mark)
+ global_subprog_calling_sleepable_global(ctx->mark);
+ preempt_enable();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c b/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c
new file mode 100644
index 000000000000..55907ef961bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_experimental.h"
+
+struct bin_data {
+ char data[256];
+ struct bpf_spin_lock lock;
+};
+
+struct map_value {
+ struct bin_data __kptr * data;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 2048);
+} array SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+bool nomem_err = false;
+
+static int del_array(unsigned int i, int *from)
+{
+ struct map_value *value;
+ struct bin_data *old;
+
+ value = bpf_map_lookup_elem(&array, from);
+ if (!value)
+ return 1;
+
+ old = bpf_kptr_xchg(&value->data, NULL);
+ if (old)
+ bpf_obj_drop(old);
+
+ (*from)++;
+ return 0;
+}
+
+static int add_array(unsigned int i, int *from)
+{
+ struct bin_data *old, *new;
+ struct map_value *value;
+
+ value = bpf_map_lookup_elem(&array, from);
+ if (!value)
+ return 1;
+
+ new = bpf_obj_new(typeof(*new));
+ if (!new) {
+ nomem_err = true;
+ return 1;
+ }
+
+ old = bpf_kptr_xchg(&value->data, new);
+ if (old)
+ bpf_obj_drop(old);
+
+ (*from)++;
+ return 0;
+}
+
+static void del_then_add_array(int from)
+{
+ int i;
+
+ i = from;
+ bpf_loop(512, del_array, &i, 0);
+
+ i = from;
+ bpf_loop(512, add_array, &i, 0);
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG2(test0, int, a)
+{
+ del_then_add_array(0);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test2")
+int BPF_PROG2(test1, int, a, u64, b)
+{
+ del_then_add_array(512);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test3")
+int BPF_PROG2(test2, char, a, int, b, u64, c)
+{
+ del_then_add_array(1024);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test4")
+int BPF_PROG2(test3, void *, a, char, b, int, c, u64, d)
+{
+ del_then_add_array(1536);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/prepare.c b/tools/testing/selftests/bpf/progs/prepare.c
new file mode 100644
index 000000000000..cfc1f48e0d28
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/prepare.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+int err;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
+} ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} array_map SEC(".maps");
+
+SEC("cgroup_skb/egress")
+int program(struct __sk_buff *skb)
+{
+ err = 0;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/priv_freplace_prog.c b/tools/testing/selftests/bpf/progs/priv_freplace_prog.c
new file mode 100644
index 000000000000..ccf1b04010ba
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/priv_freplace_prog.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("freplace/xdp_prog1")
+int new_xdp_prog2(struct xdp_md *xd)
+{
+ return XDP_DROP;
+}
diff --git a/tools/testing/selftests/bpf/progs/priv_map.c b/tools/testing/selftests/bpf/progs/priv_map.c
new file mode 100644
index 000000000000..9085be50f03b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/priv_map.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_QUEUE);
+ __uint(max_entries, 1);
+ __type(value, __u32);
+} priv_map SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/priv_prog.c b/tools/testing/selftests/bpf/progs/priv_prog.c
new file mode 100644
index 000000000000..725e29595079
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/priv_prog.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("xdp")
+int xdp_prog1(struct xdp_md *xdp)
+{
+ return XDP_DROP;
+}
diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue.c b/tools/testing/selftests/bpf/progs/pro_epilogue.c
new file mode 100644
index 000000000000..d97d6e07ef5c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pro_epilogue.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+void __kfunc_btf_root(void)
+{
+ bpf_kfunc_st_ops_inc10(NULL);
+}
+
+static __noinline __used int subprog(struct st_ops_args *args)
+{
+ args->a += 1;
+ return args->a;
+}
+
+__success
+/* prologue */
+__xlated("0: r6 = *(u64 *)(r1 +0)")
+__xlated("1: r7 = *(u64 *)(r6 +0)")
+__xlated("2: r7 += 1000")
+__xlated("3: *(u64 *)(r6 +0) = r7")
+/* main prog */
+__xlated("4: r1 = *(u64 *)(r1 +0)")
+__xlated("5: r6 = r1")
+__xlated("6: call kernel-function")
+__xlated("7: r1 = r6")
+__xlated("8: call pc+1")
+__xlated("9: exit")
+SEC("struct_ops/test_prologue")
+__naked int test_prologue(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r6 = r1;"
+ "call %[bpf_kfunc_st_ops_inc10];"
+ "r1 = r6;"
+ "call subprog;"
+ "exit;"
+ :
+ : __imm(bpf_kfunc_st_ops_inc10)
+ : __clobber_all);
+}
+
+__success
+/* save __u64 *ctx to stack */
+__xlated("0: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("1: r1 = *(u64 *)(r1 +0)")
+__xlated("2: r6 = r1")
+__xlated("3: call kernel-function")
+__xlated("4: r1 = r6")
+__xlated("5: call pc+")
+/* epilogue */
+__xlated("6: r1 = *(u64 *)(r10 -8)")
+__xlated("7: r1 = *(u64 *)(r1 +0)")
+__xlated("8: r6 = *(u64 *)(r1 +0)")
+__xlated("9: r6 += 10000")
+__xlated("10: *(u64 *)(r1 +0) = r6")
+__xlated("11: r0 = r6")
+__xlated("12: r0 *= 2")
+__xlated("13: exit")
+SEC("struct_ops/test_epilogue")
+__naked int test_epilogue(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r6 = r1;"
+ "call %[bpf_kfunc_st_ops_inc10];"
+ "r1 = r6;"
+ "call subprog;"
+ "exit;"
+ :
+ : __imm(bpf_kfunc_st_ops_inc10)
+ : __clobber_all);
+}
+
+__success
+/* prologue */
+__xlated("0: r6 = *(u64 *)(r1 +0)")
+__xlated("1: r7 = *(u64 *)(r6 +0)")
+__xlated("2: r7 += 1000")
+__xlated("3: *(u64 *)(r6 +0) = r7")
+/* save __u64 *ctx to stack */
+__xlated("4: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("5: r1 = *(u64 *)(r1 +0)")
+__xlated("6: r6 = r1")
+__xlated("7: call kernel-function")
+__xlated("8: r1 = r6")
+__xlated("9: call pc+")
+/* epilogue */
+__xlated("10: r1 = *(u64 *)(r10 -8)")
+__xlated("11: r1 = *(u64 *)(r1 +0)")
+__xlated("12: r6 = *(u64 *)(r1 +0)")
+__xlated("13: r6 += 10000")
+__xlated("14: *(u64 *)(r1 +0) = r6")
+__xlated("15: r0 = r6")
+__xlated("16: r0 *= 2")
+__xlated("17: exit")
+SEC("struct_ops/test_pro_epilogue")
+__naked int test_pro_epilogue(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r6 = r1;"
+ "call %[bpf_kfunc_st_ops_inc10];"
+ "r1 = r6;"
+ "call subprog;"
+ "exit;"
+ :
+ : __imm(bpf_kfunc_st_ops_inc10)
+ : __clobber_all);
+}
+
+SEC("syscall")
+__retval(1011) /* PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] */
+int syscall_prologue(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_prologue(&args);
+}
+
+SEC("syscall")
+__retval(20022) /* (KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
+int syscall_epilogue(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_epilogue(&args);
+}
+
+SEC("syscall")
+__retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
+int syscall_pro_epilogue(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_pro_epilogue(&args);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops pro_epilogue = {
+ .test_prologue = (void *)test_prologue,
+ .test_epilogue = (void *)test_epilogue,
+ .test_pro_epilogue = (void *)test_pro_epilogue,
+};
diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c
new file mode 100644
index 000000000000..6048d79be48b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__success
+/* prologue */
+__xlated("0: r6 = *(u64 *)(r1 +0)")
+__xlated("1: r7 = *(u64 *)(r6 +0)")
+__xlated("2: r7 += 1000")
+__xlated("3: *(u64 *)(r6 +0) = r7")
+/* main prog */
+__xlated("4: if r1 == 0x0 goto pc+5")
+__xlated("5: if r1 == 0x1 goto pc+2")
+__xlated("6: r1 = 1")
+__xlated("7: goto pc-3")
+__xlated("8: r1 = 0")
+__xlated("9: goto pc-6")
+__xlated("10: r0 = 0")
+__xlated("11: exit")
+SEC("struct_ops/test_prologue_goto_start")
+__naked int test_prologue_goto_start(void)
+{
+ asm volatile (
+ "if r1 == 0 goto +5;"
+ "if r1 == 1 goto +2;"
+ "r1 = 1;"
+ "goto -3;"
+ "r1 = 0;"
+ "goto -6;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+__success
+/* save __u64 *ctx to stack */
+__xlated("0: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("1: if r1 == 0x0 goto pc+5")
+__xlated("2: if r1 == 0x1 goto pc+2")
+__xlated("3: r1 = 1")
+__xlated("4: goto pc-3")
+__xlated("5: r1 = 0")
+__xlated("6: goto pc-6")
+__xlated("7: r0 = 0")
+/* epilogue */
+__xlated("8: r1 = *(u64 *)(r10 -8)")
+__xlated("9: r1 = *(u64 *)(r1 +0)")
+__xlated("10: r6 = *(u64 *)(r1 +0)")
+__xlated("11: r6 += 10000")
+__xlated("12: *(u64 *)(r1 +0) = r6")
+__xlated("13: r0 = r6")
+__xlated("14: r0 *= 2")
+__xlated("15: exit")
+SEC("struct_ops/test_epilogue_goto_start")
+__naked int test_epilogue_goto_start(void)
+{
+ asm volatile (
+ "if r1 == 0 goto +5;"
+ "if r1 == 1 goto +2;"
+ "r1 = 1;"
+ "goto -3;"
+ "r1 = 0;"
+ "goto -6;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+__success
+/* prologue */
+__xlated("0: r6 = *(u64 *)(r1 +0)")
+__xlated("1: r7 = *(u64 *)(r6 +0)")
+__xlated("2: r7 += 1000")
+__xlated("3: *(u64 *)(r6 +0) = r7")
+/* save __u64 *ctx to stack */
+__xlated("4: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("5: if r1 == 0x0 goto pc+5")
+__xlated("6: if r1 == 0x1 goto pc+2")
+__xlated("7: r1 = 1")
+__xlated("8: goto pc-3")
+__xlated("9: r1 = 0")
+__xlated("10: goto pc-6")
+__xlated("11: r0 = 0")
+/* epilogue */
+__xlated("12: r1 = *(u64 *)(r10 -8)")
+__xlated("13: r1 = *(u64 *)(r1 +0)")
+__xlated("14: r6 = *(u64 *)(r1 +0)")
+__xlated("15: r6 += 10000")
+__xlated("16: *(u64 *)(r1 +0) = r6")
+__xlated("17: r0 = r6")
+__xlated("18: r0 *= 2")
+__xlated("19: exit")
+SEC("struct_ops/test_pro_epilogue_goto_start")
+__naked int test_pro_epilogue_goto_start(void)
+{
+ asm volatile (
+ "if r1 == 0 goto +5;"
+ "if r1 == 1 goto +2;"
+ "r1 = 1;"
+ "goto -3;"
+ "r1 = 0;"
+ "goto -6;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_goto_start = {
+ .test_prologue = (void *)test_prologue_goto_start,
+ .test_epilogue = (void *)test_epilogue_goto_start,
+ .test_pro_epilogue = (void *)test_pro_epilogue_goto_start,
+};
+
+SEC("syscall")
+__retval(0)
+int syscall_prologue_goto_start(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_prologue(&args);
+}
+
+SEC("syscall")
+__retval(20000) /* (EPILOGUE_A [10000]) * 2 */
+int syscall_epilogue_goto_start(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_epilogue(&args);
+}
+
+SEC("syscall")
+__retval(22000) /* (PROLOGUE_A [1000] + EPILOGUE_A [10000]) * 2 */
+int syscall_pro_epilogue_goto_start(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_pro_epilogue(&args);
+}
diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue_with_kfunc.c b/tools/testing/selftests/bpf/progs/pro_epilogue_with_kfunc.c
new file mode 100644
index 000000000000..a5a8f08ac8fb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pro_epilogue_with_kfunc.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+void __kfunc_btf_root(void)
+{
+ bpf_kfunc_st_ops_inc10(NULL);
+}
+
+static __noinline __used int subprog(struct st_ops_args *args)
+{
+ args->a += 1;
+ return args->a;
+}
+
+__success
+/* prologue */
+__xlated("0: r8 = r1")
+__xlated("1: r1 = 0")
+__xlated("2: call kernel-function")
+__xlated("3: if r0 != 0x0 goto pc+5")
+__xlated("4: r6 = *(u64 *)(r8 +0)")
+__xlated("5: r7 = *(u64 *)(r6 +0)")
+__xlated("6: r7 += 1000")
+__xlated("7: *(u64 *)(r6 +0) = r7")
+__xlated("8: goto pc+2")
+__xlated("9: r1 = r0")
+__xlated("10: call kernel-function")
+__xlated("11: r1 = r8")
+/* save __u64 *ctx to stack */
+__xlated("12: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("13: r1 = *(u64 *)(r1 +0)")
+__xlated("14: r6 = r1")
+__xlated("15: call kernel-function")
+__xlated("16: r1 = r6")
+__xlated("17: call pc+")
+/* epilogue */
+__xlated("18: r1 = 0")
+__xlated("19: r6 = 0")
+__xlated("20: call kernel-function")
+__xlated("21: if r0 != 0x0 goto pc+6")
+__xlated("22: r1 = *(u64 *)(r10 -8)")
+__xlated("23: r1 = *(u64 *)(r1 +0)")
+__xlated("24: r6 = *(u64 *)(r1 +0)")
+__xlated("25: r6 += 10000")
+__xlated("26: *(u64 *)(r1 +0) = r6")
+__xlated("27: goto pc+2")
+__xlated("28: r1 = r0")
+__xlated("29: call kernel-function")
+__xlated("30: r0 = r6")
+__xlated("31: r0 *= 2")
+__xlated("32: exit")
+SEC("struct_ops/test_pro_epilogue")
+__naked int test_kfunc_pro_epilogue(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r6 = r1;"
+ "call %[bpf_kfunc_st_ops_inc10];"
+ "r1 = r6;"
+ "call subprog;"
+ "exit;"
+ :
+ : __imm(bpf_kfunc_st_ops_inc10)
+ : __clobber_all);
+}
+
+SEC("syscall")
+__retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
+int syscall_pro_epilogue(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_pro_epilogue(&args);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops pro_epilogue_with_kfunc = {
+ .test_pro_epilogue = (void *)test_kfunc_pro_epilogue,
+};
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index 00578311a423..813143b4985d 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -6,6 +6,10 @@
#include <bpf/bpf_tracing.h>
#include "profiler.h"
+#include "err.h"
+#include "bpf_experimental.h"
+#include "bpf_compiler.h"
+#include "bpf_misc.h"
#ifndef NULL
#define NULL 0
@@ -16,7 +20,6 @@
#define O_DIRECTORY 00200000
#define __O_TMPFILE 020000000
#define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
-#define MAX_ERRNO 4095
#define S_IFMT 00170000
#define S_IFSOCK 0140000
#define S_IFLNK 0120000
@@ -34,7 +37,6 @@
#define S_ISBLK(m) (((m)&S_IFMT) == S_IFBLK)
#define S_ISFIFO(m) (((m)&S_IFMT) == S_IFIFO)
#define S_ISSOCK(m) (((m)&S_IFMT) == S_IFSOCK)
-#define IS_ERR_VALUE(x) (unsigned long)(void*)(x) >= (unsigned long)-MAX_ERRNO
#define KILL_DATA_ARRAY_SIZE 8
@@ -132,10 +134,6 @@ struct {
__uint(max_entries, 16);
} disallowed_exec_inodes SEC(".maps");
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
-#endif
-
static INLINE bool IS_ERR(const void* ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
@@ -156,10 +154,10 @@ probe_read_lim(void* dst, void* src, unsigned long len, unsigned long max)
{
len = len < max ? len : max;
if (len > 1) {
- if (bpf_probe_read(dst, len, src))
+ if (bpf_probe_read_kernel(dst, len, src))
return 0;
} else if (len == 1) {
- if (bpf_probe_read(dst, 1, src))
+ if (bpf_probe_read_kernel(dst, 1, src))
return 0;
}
return len;
@@ -169,7 +167,7 @@ static INLINE int get_var_spid_index(struct var_kill_data_arr_t* arr_struct,
int spid)
{
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
if (arr_struct->array[i].meta.pid == spid)
@@ -185,7 +183,7 @@ static INLINE void populate_ancestors(struct task_struct* task,
ancestors_data->num_ancestors = 0;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (num_ancestors = 0; num_ancestors < MAX_ANCESTORS; num_ancestors++) {
parent = BPF_CORE_READ(parent, real_parent);
@@ -212,20 +210,20 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
size_t filepart_length;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) {
filepart_length =
- bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(cgroup_node, name));
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(cgroup_node, name));
if (!cgroup_node)
return payload;
if (cgroup_node == cgroup_root_node)
*root_pos = payload - payload_start;
- if (filepart_length <= MAX_PATH) {
- barrier_var(filepart_length);
+ if (bpf_cmp_likely(filepart_length, <=, MAX_PATH)) {
payload += filepart_length;
}
- cgroup_node = BPF_CORE_READ(cgroup_node, parent);
+ cgroup_node = BPF_CORE_READ(cgroup_node, __parent);
}
return payload;
}
@@ -243,7 +241,10 @@ static ino_t get_inode_from_kernfs(struct kernfs_node* node)
}
}
-int pids_cgrp_id = 1;
+extern bool CONFIG_CGROUP_PIDS __kconfig __weak;
+enum cgroup_subsys_id___local {
+ pids_cgrp_id___local = 123, /* value doesn't matter */
+};
static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
struct task_struct* task,
@@ -253,16 +254,19 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
BPF_CORE_READ(task, nsproxy, cgroup_ns, root_cset, dfl_cgrp, kn);
struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
- if (ENABLE_CGROUP_V1_RESOLVER) {
+#if __has_builtin(__builtin_preserve_enum_value)
+ if (ENABLE_CGROUP_V1_RESOLVER && CONFIG_CGROUP_PIDS) {
+ int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local,
+ pids_cgrp_id___local);
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys_state* subsys =
BPF_CORE_READ(task, cgroups, subsys[i]);
if (subsys != NULL) {
int subsys_id = BPF_CORE_READ(subsys, ss, id);
- if (subsys_id == pids_cgrp_id) {
+ if (subsys_id == cgrp_id) {
proc_kernfs = BPF_CORE_READ(subsys, cgroup, kn);
root_kernfs = BPF_CORE_READ(subsys, ss, root, kf_root, kn);
break;
@@ -270,6 +274,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
}
}
}
+#endif
cgroup_data->cgroup_root_inode = get_inode_from_kernfs(root_kernfs);
cgroup_data->cgroup_proc_inode = get_inode_from_kernfs(proc_kernfs);
@@ -296,19 +301,17 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
cgroup_data->cgroup_full_length = 0;
size_t cgroup_root_length =
- bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(root_kernfs, name));
- barrier_var(cgroup_root_length);
- if (cgroup_root_length <= MAX_PATH) {
- barrier_var(cgroup_root_length);
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(root_kernfs, name));
+ if (bpf_cmp_likely(cgroup_root_length, <=, MAX_PATH)) {
cgroup_data->cgroup_root_length = cgroup_root_length;
payload += cgroup_root_length;
}
size_t cgroup_proc_length =
- bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(proc_kernfs, name));
- barrier_var(cgroup_proc_length);
- if (cgroup_proc_length <= MAX_PATH) {
- barrier_var(cgroup_proc_length);
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(proc_kernfs, name));
+ if (bpf_cmp_likely(cgroup_proc_length, <=, MAX_PATH)) {
cgroup_data->cgroup_proc_length = cgroup_proc_length;
payload += cgroup_proc_length;
}
@@ -338,9 +341,7 @@ static INLINE void* populate_var_metadata(struct var_metadata_t* metadata,
metadata->comm_length = 0;
size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
- barrier_var(comm_length);
- if (comm_length <= TASK_COMM_LEN) {
- barrier_var(comm_length);
+ if (bpf_cmp_likely(comm_length, <=, TASK_COMM_LEN)) {
metadata->comm_length = comm_length;
payload += comm_length;
}
@@ -388,7 +389,8 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
arr_struct = bpf_map_lookup_elem(&data_heap, &zero);
if (arr_struct == NULL)
return 0;
- bpf_probe_read(&arr_struct->array[0], sizeof(arr_struct->array[0]), kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[0],
+ sizeof(arr_struct->array[0]), kill_data);
} else {
int index = get_var_spid_index(arr_struct, spid);
@@ -398,12 +400,13 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
if (kill_data == NULL)
return 0;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
if (arr_struct->array[i].meta.pid == 0) {
- bpf_probe_read(&arr_struct->array[i],
- sizeof(arr_struct->array[i]), kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[i],
+ sizeof(arr_struct->array[i]),
+ kill_data);
bpf_map_update_elem(&var_tpid_to_data, &tpid,
arr_struct, 0);
@@ -420,17 +423,17 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
if (delta_sec < STALE_INFO) {
kill_data->kill_count++;
kill_data->last_kill_time = bpf_ktime_get_ns();
- bpf_probe_read(&arr_struct->array[index],
- sizeof(arr_struct->array[index]),
- kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[index],
+ sizeof(arr_struct->array[index]),
+ kill_data);
} else {
struct var_kill_data_t* kill_data =
get_var_kill_data(ctx, spid, tpid, sig);
if (kill_data == NULL)
return 0;
- bpf_probe_read(&arr_struct->array[index],
- sizeof(arr_struct->array[index]),
- kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[index],
+ sizeof(arr_struct->array[index]),
+ kill_data);
}
}
bpf_map_update_elem(&var_tpid_to_data, &tpid, arr_struct, 0);
@@ -477,15 +480,15 @@ read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload)
struct dentry* parent_dentry;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < MAX_PATH_DEPTH; i++) {
- filepart_length = bpf_probe_read_str(payload, MAX_PATH,
- BPF_CORE_READ(filp_dentry, d_name.name));
- barrier_var(filepart_length);
- if (filepart_length > MAX_PATH)
+ filepart_length =
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(filp_dentry, d_name.name));
+ bpf_nop_mov(filepart_length);
+ if (bpf_cmp_unlikely(filepart_length, >, MAX_PATH))
break;
- barrier_var(filepart_length);
payload += filepart_length;
length += filepart_length;
@@ -503,7 +506,7 @@ is_ancestor_in_allowed_inodes(struct dentry* filp_dentry)
{
struct dentry* parent_dentry;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < MAX_PATH_DEPTH; i++) {
u64 dir_ino = BPF_CORE_READ(filp_dentry, d_inode, i_ino);
@@ -565,19 +568,18 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write,
sysctl_data->sysctl_val_length = 0;
sysctl_data->sysctl_path_length = 0;
- size_t sysctl_val_length = bpf_probe_read_str(payload, CTL_MAXNAME, buf);
- barrier_var(sysctl_val_length);
- if (sysctl_val_length <= CTL_MAXNAME) {
- barrier_var(sysctl_val_length);
+ size_t sysctl_val_length = bpf_probe_read_kernel_str(payload,
+ CTL_MAXNAME, buf);
+ if (bpf_cmp_likely(sysctl_val_length, <=, CTL_MAXNAME)) {
sysctl_data->sysctl_val_length = sysctl_val_length;
payload += sysctl_val_length;
}
- size_t sysctl_path_length = bpf_probe_read_str(payload, MAX_PATH,
- BPF_CORE_READ(filp, f_path.dentry, d_name.name));
- barrier_var(sysctl_path_length);
- if (sysctl_path_length <= MAX_PATH) {
- barrier_var(sysctl_path_length);
+ size_t sysctl_path_length =
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(filp, f_path.dentry,
+ d_name.name));
+ if (bpf_cmp_likely(sysctl_path_length, <=, MAX_PATH)) {
sysctl_data->sysctl_path_length = sysctl_path_length;
payload += sysctl_path_length;
}
@@ -594,7 +596,7 @@ out:
}
SEC("tracepoint/syscalls/sys_enter_kill")
-int tracepoint__syscalls__sys_enter_kill(struct trace_event_raw_sys_enter* ctx)
+int tracepoint__syscalls__sys_enter_kill(struct syscall_trace_enter* ctx)
{
struct bpf_func_stats_ctx stats_ctx;
@@ -625,13 +627,14 @@ int raw_tracepoint__sched_process_exit(void* ctx)
struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) {
struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
- if (past_kill_data != NULL && past_kill_data->kill_target_pid == tpid) {
- bpf_probe_read(kill_data, sizeof(*past_kill_data), past_kill_data);
+ if (past_kill_data != NULL && past_kill_data->kill_target_pid == (pid_t)tpid) {
+ bpf_probe_read_kernel(kill_data, sizeof(*past_kill_data),
+ past_kill_data);
void* payload = kill_data->payload;
size_t offset = kill_data->payload_length;
if (offset >= MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN)
@@ -642,18 +645,16 @@ int raw_tracepoint__sched_process_exit(void* ctx)
kill_data->kill_target_cgroup_proc_length = 0;
size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
- barrier_var(comm_length);
- if (comm_length <= TASK_COMM_LEN) {
- barrier_var(comm_length);
+ if (bpf_cmp_likely(comm_length, <=, TASK_COMM_LEN)) {
kill_data->kill_target_name_length = comm_length;
payload += comm_length;
}
- size_t cgroup_proc_length = bpf_probe_read_str(payload, KILL_TARGET_LEN,
- BPF_CORE_READ(proc_kernfs, name));
- barrier_var(cgroup_proc_length);
- if (cgroup_proc_length <= KILL_TARGET_LEN) {
- barrier_var(cgroup_proc_length);
+ size_t cgroup_proc_length =
+ bpf_probe_read_kernel_str(payload,
+ KILL_TARGET_LEN,
+ BPF_CORE_READ(proc_kernfs, name));
+ if (bpf_cmp_likely(cgroup_proc_length, <=, KILL_TARGET_LEN)) {
kill_data->kill_target_cgroup_proc_length = cgroup_proc_length;
payload += cgroup_proc_length;
}
@@ -711,10 +712,9 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
proc_exec_data->parent_start_time = BPF_CORE_READ(parent_task, start_time);
const char* filename = BPF_CORE_READ(bprm, filename);
- size_t bin_path_length = bpf_probe_read_str(payload, MAX_FILENAME_LEN, filename);
- barrier_var(bin_path_length);
- if (bin_path_length <= MAX_FILENAME_LEN) {
- barrier_var(bin_path_length);
+ size_t bin_path_length =
+ bpf_probe_read_kernel_str(payload, MAX_FILENAME_LEN, filename);
+ if (bpf_cmp_likely(bin_path_length, <=, MAX_FILENAME_LEN)) {
proc_exec_data->bin_path_length = bin_path_length;
payload += bin_path_length;
}
@@ -724,8 +724,7 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
unsigned int cmdline_length = probe_read_lim(payload, arg_start,
arg_end - arg_start, MAX_ARGS_LEN);
- if (cmdline_length <= MAX_ARGS_LEN) {
- barrier_var(cmdline_length);
+ if (bpf_cmp_likely(cmdline_length, <=, MAX_ARGS_LEN)) {
proc_exec_data->cmdline_length = cmdline_length;
payload += cmdline_length;
}
@@ -802,9 +801,7 @@ int kprobe_ret__do_filp_open(struct pt_regs* ctx)
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
size_t len = read_absolute_file_path_from_dentry(filp_dentry, payload);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->dst_filepath_length = len;
}
@@ -819,8 +816,9 @@ out:
SEC("kprobe/vfs_link")
int BPF_KPROBE(kprobe__vfs_link,
- struct dentry* old_dentry, struct inode* dir,
- struct dentry* new_dentry, struct inode** delegated_inode)
+ struct dentry* old_dentry, struct mnt_idmap *idmap,
+ struct inode* dir, struct dentry* new_dentry,
+ struct inode** delegated_inode)
{
struct bpf_func_stats_ctx stats_ctx;
bpf_stats_enter(&stats_ctx, profiler_bpf_vfs_link);
@@ -856,17 +854,13 @@ int BPF_KPROBE(kprobe__vfs_link,
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
size_t len = read_absolute_file_path_from_dentry(old_dentry, payload);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->src_filepath_length = len;
}
len = read_absolute_file_path_from_dentry(new_dentry, payload);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->dst_filepath_length = len;
}
@@ -914,17 +908,14 @@ int BPF_KPROBE(kprobe__vfs_symlink, struct inode* dir, struct dentry* dentry,
filemod_data->payload);
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
- size_t len = bpf_probe_read_str(payload, MAX_FILEPATH_LENGTH, oldname);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ size_t len = bpf_probe_read_kernel_str(payload, MAX_FILEPATH_LENGTH,
+ oldname);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->src_filepath_length = len;
}
len = read_absolute_file_path_from_dentry(dentry, payload);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->dst_filepath_length = len;
}
diff --git a/tools/testing/selftests/bpf/progs/profiler1.c b/tools/testing/selftests/bpf/progs/profiler1.c
index 4df9088bfc00..fb6b13522949 100644
--- a/tools/testing/selftests/bpf/progs/profiler1.c
+++ b/tools/testing/selftests/bpf/progs/profiler1.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
#define UNROLL
#define INLINE __always_inline
#include "profiler.inc.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h
index 2fb7adafb6b6..86484f07e1d1 100644
--- a/tools/testing/selftests/bpf/progs/pyperf.h
+++ b/tools/testing/selftests/bpf/progs/pyperf.h
@@ -7,6 +7,8 @@
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_compiler.h"
#define FUNCTION_NAME_LEN 64
#define FILE_NAME_LEN 128
@@ -159,6 +161,57 @@ struct {
__uint(value_size, sizeof(long long) * 127);
} stackmap SEC(".maps");
+#ifdef USE_BPF_LOOP
+struct process_frame_ctx {
+ int cur_cpu;
+ int32_t *symbol_counter;
+ void *frame_ptr;
+ FrameData *frame;
+ PidData *pidData;
+ Symbol *sym;
+ Event *event;
+ bool done;
+};
+
+static int process_frame_callback(__u32 i, struct process_frame_ctx *ctx)
+{
+ int zero = 0;
+ void *frame_ptr = ctx->frame_ptr;
+ PidData *pidData = ctx->pidData;
+ FrameData *frame = ctx->frame;
+ int32_t *symbol_counter = ctx->symbol_counter;
+ int cur_cpu = ctx->cur_cpu;
+ Event *event = ctx->event;
+ Symbol *sym = ctx->sym;
+
+ if (frame_ptr && get_frame_data(frame_ptr, pidData, frame, sym)) {
+ int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu;
+ int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, sym);
+
+ if (!symbol_id) {
+ bpf_map_update_elem(&symbolmap, sym, &zero, 0);
+ symbol_id = bpf_map_lookup_elem(&symbolmap, sym);
+ if (!symbol_id) {
+ ctx->done = true;
+ return 1;
+ }
+ }
+ if (*symbol_id == new_symbol_id)
+ (*symbol_counter)++;
+
+ barrier_var(i);
+ if (i >= STACK_MAX_LEN)
+ return 1;
+
+ event->stack[i] = *symbol_id;
+
+ event->stack_len = i + 1;
+ frame_ptr = frame->f_back;
+ }
+ return 0;
+}
+#endif /* USE_BPF_LOOP */
+
#ifdef GLOBAL_FUNC
__noinline
#elif defined(SUBPROGS)
@@ -228,13 +281,37 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym);
if (symbol_counter == NULL)
return 0;
-#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+#ifdef USE_BPF_LOOP
+ struct process_frame_ctx ctx = {
+ .cur_cpu = cur_cpu,
+ .symbol_counter = symbol_counter,
+ .frame_ptr = frame_ptr,
+ .frame = &frame,
+ .pidData = pidData,
+ .sym = &sym,
+ .event = event,
+ };
+
+ bpf_loop(STACK_MAX_LEN, process_frame_callback, &ctx, 0);
+ if (ctx.done)
+ return 0;
#else
-#pragma clang loop unroll(full)
-#endif
+#if defined(USE_ITER)
+/* no for loop, no unrolling */
+#elif defined(NO_UNROLL)
+ __pragma_loop_no_unroll
+#elif defined(UNROLL_COUNT)
+ __pragma_loop_unroll_count(UNROLL_COUNT)
+#else
+ __pragma_loop_unroll_full
+#endif /* NO_UNROLL */
/* Unwind python stack */
+#ifdef USE_ITER
+ int i;
+ bpf_for(i, 0, STACK_MAX_LEN) {
+#else /* !USE_ITER */
for (int i = 0; i < STACK_MAX_LEN; ++i) {
+#endif
if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) {
int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu;
int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, &sym);
@@ -251,6 +328,7 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
frame_ptr = frame.f_back;
}
}
+#endif /* USE_BPF_LOOP */
event->stack_complete = frame_ptr == NULL;
} else {
event->stack_complete = 1;
@@ -268,7 +346,7 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
SEC("raw_tracepoint/kfree_skb")
int on_event(struct bpf_raw_tracepoint_args* ctx)
{
- int i, ret = 0;
+ int ret = 0;
ret |= __on_event(ctx);
ret |= __on_event(ctx);
ret |= __on_event(ctx);
diff --git a/tools/testing/selftests/bpf/progs/pyperf180.c b/tools/testing/selftests/bpf/progs/pyperf180.c
index c39f559d3100..42c4a8b62e36 100644
--- a/tools/testing/selftests/bpf/progs/pyperf180.c
+++ b/tools/testing/selftests/bpf/progs/pyperf180.c
@@ -1,4 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 180
+
+/* llvm upstream commit at clang18
+ * https://github.com/llvm/llvm-project/commit/1a2e77cf9e11dbf56b5720c607313a566eebb16e
+ * changed inlining behavior and caused compilation failure as some branch
+ * target distance exceeded 16bit representation which is the maximum for
+ * cpu v1/v2/v3. Macro __BPF_CPU_VERSION__ is later implemented in clang18
+ * to specify which cpu version is used for compilation. So a smaller
+ * unroll_count can be set if __BPF_CPU_VERSION__ is less than 4, which
+ * reduced some branch target distances and resolved the compilation failure.
+ *
+ * To capture the case where a developer/ci uses clang18 but the corresponding
+ * repo checkpoint does not have __BPF_CPU_VERSION__, a smaller unroll_count
+ * will be set as well to prevent potential compilation failures.
+ */
+#ifdef __BPF_CPU_VERSION__
+#if __BPF_CPU_VERSION__ < 4
+#define UNROLL_COUNT 90
+#endif
+#elif __clang_major__ == 18
+#define UNROLL_COUNT 90
+#endif
+
#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf600.c b/tools/testing/selftests/bpf/progs/pyperf600.c
index cb49b89e37cd..ce1aa5189cc4 100644
--- a/tools/testing/selftests/bpf/progs/pyperf600.c
+++ b/tools/testing/selftests/bpf/progs/pyperf600.c
@@ -1,9 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
-/* clang will not unroll the loop 600 times.
- * Instead it will unroll it to the amount it deemed
- * appropriate, but the loop will still execute 600 times.
- * Total program size is around 90k insns
+/* Full unroll of 600 iterations will have total
+ * program size close to 298k insns and this may
+ * cause BPF_JMP insn out of 16-bit integer range.
+ * So limit the unroll size to 150 so the
+ * total program size is around 80k insns but
+ * the loop will still execute 600 times.
*/
+#define UNROLL_COUNT 150
#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c b/tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c
new file mode 100644
index 000000000000..5c2059dc01af
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#define STACK_MAX_LEN 600
+#define USE_BPF_LOOP
+#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf600_iter.c b/tools/testing/selftests/bpf/progs/pyperf600_iter.c
new file mode 100644
index 000000000000..d62e1b200c30
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pyperf600_iter.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+#define STACK_MAX_LEN 600
+#define SUBPROGS
+#define NO_UNROLL
+#define USE_ITER
+#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c b/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c
index 6beff7502f4d..520b58c4f8db 100644
--- a/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c
+++ b/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c
@@ -2,7 +2,4 @@
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
#define NO_UNROLL
-/* clang will not unroll at all.
- * Total program size is around 2k insns
- */
#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null.c b/tools/testing/selftests/bpf/progs/raw_tp_null.c
new file mode 100644
index 000000000000..efa416f53968
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/raw_tp_null.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int tid;
+int i;
+
+SEC("tp_btf/bpf_testmod_test_raw_tp_null_tp")
+int BPF_PROG(test_raw_tp_null, struct sk_buff *skb)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+
+ if (task->pid != tid)
+ return 0;
+
+ /* If dead code elimination kicks in, the increment +=2 will be
+ * removed. For raw_tp programs attaching to tracepoints in kernel
+ * modules, we mark input arguments as PTR_MAYBE_NULL, so branch
+ * prediction should never kick in.
+ */
+ asm volatile ("%[i] += 1; if %[ctx] != 0 goto +1; %[i] += 2;"
+ : [i]"+r"(i)
+ : [ctx]"r"(skb)
+ : "memory");
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c b/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c
new file mode 100644
index 000000000000..0d58114a4955
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Ensure module parameter has PTR_MAYBE_NULL */
+SEC("tp_btf/bpf_testmod_test_raw_tp_null_tp")
+__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'")
+int test_raw_tp_null_bpf_testmod_test_raw_tp_null_arg_1(void *ctx) {
+ asm volatile("r1 = *(u64 *)(r1 +0); r1 = *(u64 *)(r1 +0);" ::: __clobber_all);
+ return 0;
+}
+
+/* Check NULL marking */
+SEC("tp_btf/sched_pi_setprio")
+__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'")
+int test_raw_tp_null_sched_pi_setprio_arg_2(void *ctx) {
+ asm volatile("r1 = *(u64 *)(r1 +8); r1 = *(u64 *)(r1 +0);" ::: __clobber_all);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c
new file mode 100644
index 000000000000..49fe93d7e059
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct node_data {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+};
+
+struct root_nested_inner {
+ struct bpf_spin_lock glock;
+ struct bpf_rb_root root __contains(node_data, node);
+};
+
+struct root_nested {
+ struct root_nested_inner inner;
+};
+
+long less_callback_ran = -1;
+long removed_key = -1;
+long first_data[2] = {-1, -1};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+private(A) struct bpf_rb_root groot_array[2] __contains(node_data, node);
+private(A) struct bpf_rb_root groot_array_one[1] __contains(node_data, node);
+private(B) struct root_nested groot_nested;
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+ less_callback_ran = 1;
+
+ return node_a->key < node_b->key;
+}
+
+static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock)
+{
+ struct node_data *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+ n->key = 5;
+
+ m = bpf_obj_new(typeof(*m));
+ if (!m) {
+ bpf_obj_drop(n);
+ return 2;
+ }
+ m->key = 1;
+
+ bpf_spin_lock(lock);
+ bpf_rbtree_add(root, &n->node, less);
+ bpf_rbtree_add(root, &m->node, less);
+ bpf_spin_unlock(lock);
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 3;
+ n->key = 3;
+
+ bpf_spin_lock(lock);
+ bpf_rbtree_add(root, &n->node, less);
+ bpf_spin_unlock(lock);
+ return 0;
+}
+
+SEC("tc")
+long rbtree_add_nodes(void *ctx)
+{
+ return __add_three(&groot, &glock);
+}
+
+SEC("tc")
+long rbtree_add_nodes_nested(void *ctx)
+{
+ return __add_three(&groot_nested.inner.root, &groot_nested.inner.glock);
+}
+
+SEC("tc")
+long rbtree_add_and_remove(void *ctx)
+{
+ struct bpf_rb_node *res = NULL;
+ struct node_data *n, *m = NULL;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ goto err_out;
+ n->key = 5;
+
+ m = bpf_obj_new(typeof(*m));
+ if (!m)
+ goto err_out;
+ m->key = 3;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_rbtree_add(&groot, &m->node, less);
+ res = bpf_rbtree_remove(&groot, &n->node);
+ bpf_spin_unlock(&glock);
+
+ if (!res)
+ return 1;
+
+ n = container_of(res, struct node_data, node);
+ removed_key = n->key;
+ bpf_obj_drop(n);
+
+ return 0;
+err_out:
+ if (n)
+ bpf_obj_drop(n);
+ if (m)
+ bpf_obj_drop(m);
+ return 1;
+}
+
+SEC("tc")
+long rbtree_add_and_remove_array(void *ctx)
+{
+ struct bpf_rb_node *res1 = NULL, *res2 = NULL, *res3 = NULL;
+ struct node_data *nodes[3][2] = {{NULL, NULL}, {NULL, NULL}, {NULL, NULL}};
+ struct node_data *n;
+ long k1 = -1, k2 = -1, k3 = -1;
+ int i, j;
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 2; j++) {
+ nodes[i][j] = bpf_obj_new(typeof(*nodes[i][j]));
+ if (!nodes[i][j])
+ goto err_out;
+ nodes[i][j]->key = i * 2 + j;
+ }
+ }
+
+ bpf_spin_lock(&glock);
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 2; j++)
+ bpf_rbtree_add(&groot_array[i], &nodes[i][j]->node, less);
+ for (j = 0; j < 2; j++)
+ bpf_rbtree_add(&groot_array_one[0], &nodes[2][j]->node, less);
+ res1 = bpf_rbtree_remove(&groot_array[0], &nodes[0][0]->node);
+ res2 = bpf_rbtree_remove(&groot_array[1], &nodes[1][0]->node);
+ res3 = bpf_rbtree_remove(&groot_array_one[0], &nodes[2][0]->node);
+ bpf_spin_unlock(&glock);
+
+ if (res1) {
+ n = container_of(res1, struct node_data, node);
+ k1 = n->key;
+ bpf_obj_drop(n);
+ }
+ if (res2) {
+ n = container_of(res2, struct node_data, node);
+ k2 = n->key;
+ bpf_obj_drop(n);
+ }
+ if (res3) {
+ n = container_of(res3, struct node_data, node);
+ k3 = n->key;
+ bpf_obj_drop(n);
+ }
+ if (k1 != 0 || k2 != 2 || k3 != 4)
+ return 2;
+
+ return 0;
+
+err_out:
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 2; j++) {
+ if (nodes[i][j])
+ bpf_obj_drop(nodes[i][j]);
+ }
+ }
+ return 1;
+}
+
+SEC("tc")
+long rbtree_first_and_remove(void *ctx)
+{
+ struct bpf_rb_node *res = NULL;
+ struct node_data *n, *m, *o;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+ n->key = 3;
+ n->data = 4;
+
+ m = bpf_obj_new(typeof(*m));
+ if (!m)
+ goto err_out;
+ m->key = 5;
+ m->data = 6;
+
+ o = bpf_obj_new(typeof(*o));
+ if (!o)
+ goto err_out;
+ o->key = 1;
+ o->data = 2;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_rbtree_add(&groot, &m->node, less);
+ bpf_rbtree_add(&groot, &o->node, less);
+
+ res = bpf_rbtree_first(&groot);
+ if (!res) {
+ bpf_spin_unlock(&glock);
+ return 2;
+ }
+
+ o = container_of(res, struct node_data, node);
+ first_data[0] = o->data;
+
+ res = bpf_rbtree_remove(&groot, &o->node);
+ bpf_spin_unlock(&glock);
+
+ if (!res)
+ return 5;
+
+ o = container_of(res, struct node_data, node);
+ removed_key = o->key;
+ bpf_obj_drop(o);
+
+ bpf_spin_lock(&glock);
+ res = bpf_rbtree_first(&groot);
+ if (!res) {
+ bpf_spin_unlock(&glock);
+ return 3;
+ }
+
+ o = container_of(res, struct node_data, node);
+ first_data[1] = o->data;
+ bpf_spin_unlock(&glock);
+
+ return 0;
+err_out:
+ if (n)
+ bpf_obj_drop(n);
+ if (m)
+ bpf_obj_drop(m);
+ return 1;
+}
+
+SEC("tc")
+long rbtree_api_release_aliasing(void *ctx)
+{
+ struct node_data *n, *m, *o;
+ struct bpf_rb_node *res, *res2;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+ n->key = 41;
+ n->data = 42;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ bpf_spin_lock(&glock);
+
+ /* m and o point to the same node,
+ * but verifier doesn't know this
+ */
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ goto err_out;
+ o = container_of(res, struct node_data, node);
+
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ goto err_out;
+ m = container_of(res, struct node_data, node);
+
+ res = bpf_rbtree_remove(&groot, &m->node);
+ /* Retval of previous remove returns an owning reference to m,
+ * which is the same node non-owning ref o is pointing at.
+ * We can safely try to remove o as the second rbtree_remove will
+ * return NULL since the node isn't in a tree.
+ *
+ * Previously we relied on the verifier type system + rbtree_remove
+ * invalidating non-owning refs to ensure that rbtree_remove couldn't
+ * fail, but now rbtree_remove does runtime checking so we no longer
+ * invalidate non-owning refs after remove.
+ */
+ res2 = bpf_rbtree_remove(&groot, &o->node);
+
+ bpf_spin_unlock(&glock);
+
+ if (res) {
+ o = container_of(res, struct node_data, node);
+ first_data[0] = o->data;
+ bpf_obj_drop(o);
+ }
+ if (res2) {
+ /* The second remove fails, so res2 is null and this doesn't
+ * execute
+ */
+ m = container_of(res2, struct node_data, node);
+ first_data[1] = m->data;
+ bpf_obj_drop(m);
+ }
+ return 0;
+
+err_out:
+ bpf_spin_unlock(&glock);
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
new file mode 100644
index 000000000000..60079b202c07
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct node_data {
+ int key;
+ int data;
+ struct bpf_rb_node node;
+};
+
+struct node_data2 {
+ int key;
+ struct bpf_rb_node node;
+ int data;
+};
+
+static bool less2(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data2 *node_a;
+ struct node_data2 *node_b;
+
+ node_a = container_of(a, struct node_data2, node);
+ node_b = container_of(b, struct node_data2, node);
+
+ return node_a->key < node_b->key;
+}
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+
+SEC("tc")
+long rbtree_api_add__add_wrong_type(void *ctx)
+{
+ struct node_data2 *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less2);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c
new file mode 100644
index 000000000000..7651843f5a80
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+/* BTF load should fail as bpf_rb_root __contains this type and points to
+ * 'node', but 'node' is not a bpf_rb_node
+ */
+struct node_data {
+ int key;
+ int data;
+ struct bpf_list_node node;
+};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+
+SEC("tc")
+long rbtree_api_add__wrong_node_type(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_first(&groot);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c
new file mode 100644
index 000000000000..4acb6af2dfe3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct node_data {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+private(A) struct bpf_rb_root groot2 __contains(node_data, node);
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+
+ return node_a->key < node_b->key;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
+long rbtree_api_nolock_add(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_rbtree_add(&groot, &n->node, less);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
+long rbtree_api_nolock_remove(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ bpf_rbtree_remove(&groot, &n->node);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
+long rbtree_api_nolock_first(void *ctx)
+{
+ bpf_rbtree_first(&groot);
+ return 0;
+}
+
+SEC("?tc")
+__retval(0)
+long rbtree_api_remove_unadded_node(void *ctx)
+{
+ struct node_data *n, *m;
+ struct bpf_rb_node *res_n, *res_m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ m = bpf_obj_new(typeof(*m));
+ if (!m) {
+ bpf_obj_drop(n);
+ return 1;
+ }
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+
+ res_n = bpf_rbtree_remove(&groot, &n->node);
+
+ res_m = bpf_rbtree_remove(&groot, &m->node);
+ bpf_spin_unlock(&glock);
+
+ bpf_obj_drop(m);
+ if (res_n)
+ bpf_obj_drop(container_of(res_n, struct node_data, node));
+ if (res_m) {
+ bpf_obj_drop(container_of(res_m, struct node_data, node));
+ /* m was not added to the rbtree */
+ return 2;
+ }
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}")
+long rbtree_api_remove_no_drop(void *ctx)
+{
+ struct bpf_rb_node *res;
+ struct node_data *n;
+
+ bpf_spin_lock(&glock);
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ goto unlock_err;
+
+ res = bpf_rbtree_remove(&groot, res);
+
+ if (res) {
+ n = container_of(res, struct node_data, node);
+ __sink(n);
+ }
+ bpf_spin_unlock(&glock);
+
+ /* if (res) { bpf_obj_drop(n); } is missing here */
+ return 0;
+
+unlock_err:
+ bpf_spin_unlock(&glock);
+ return 1;
+}
+
+SEC("?tc")
+__failure __msg("arg#1 expected pointer to allocated object")
+long rbtree_api_add_to_multiple_trees(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+
+ /* This add should fail since n already in groot's tree */
+ bpf_rbtree_add(&groot2, &n->node, less);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("dereference of modified ptr_or_null_ ptr R2 off=16 disallowed")
+long rbtree_api_use_unchecked_remove_retval(void *ctx)
+{
+ struct bpf_rb_node *res;
+
+ bpf_spin_lock(&glock);
+
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ goto err_out;
+ res = bpf_rbtree_remove(&groot, res);
+
+ bpf_spin_unlock(&glock);
+
+ bpf_spin_lock(&glock);
+ /* Must check res for NULL before using in rbtree_add below */
+ bpf_rbtree_add(&groot, res, less);
+ bpf_spin_unlock(&glock);
+ return 0;
+
+err_out:
+ bpf_spin_unlock(&glock);
+ return 1;
+}
+
+SEC("?tc")
+__failure __msg("bpf_rbtree_remove can only take non-owning or refcounted bpf_rb_node pointer")
+long rbtree_api_add_release_unlock_escape(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ bpf_spin_lock(&glock);
+ /* After add() in previous critical section, n should be
+ * release_on_unlock and released after previous spin_unlock,
+ * so should not be possible to use it here
+ */
+ bpf_rbtree_remove(&groot, &n->node);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_rbtree_remove can only take non-owning or refcounted bpf_rb_node pointer")
+long rbtree_api_first_release_unlock_escape(void *ctx)
+{
+ struct bpf_rb_node *res;
+ struct node_data *n;
+
+ bpf_spin_lock(&glock);
+ res = bpf_rbtree_first(&groot);
+ if (!res) {
+ bpf_spin_unlock(&glock);
+ return 1;
+ }
+ n = container_of(res, struct node_data, node);
+ bpf_spin_unlock(&glock);
+
+ bpf_spin_lock(&glock);
+ /* After first() in previous critical section, n should be
+ * release_on_unlock and released after previous spin_unlock,
+ * so should not be possible to use it here
+ */
+ bpf_rbtree_remove(&groot, &n->node);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+static bool less__bad_fn_call_add(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+ bpf_rbtree_add(&groot, &node_a->node, less);
+
+ return node_a->key < node_b->key;
+}
+
+static bool less__bad_fn_call_remove(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+ bpf_rbtree_remove(&groot, &node_a->node);
+
+ return node_a->key < node_b->key;
+}
+
+static bool less__bad_fn_call_first_unlock_after(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+ bpf_rbtree_first(&groot);
+ bpf_spin_unlock(&glock);
+
+ return node_a->key < node_b->key;
+}
+
+static __always_inline
+long add_with_cb(bool (cb)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, cb);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("arg#1 expected pointer to allocated object")
+long rbtree_api_add_bad_cb_bad_fn_call_add(void *ctx)
+{
+ return add_with_cb(less__bad_fn_call_add);
+}
+
+SEC("?tc")
+__failure __msg("rbtree_remove not allowed in rbtree cb")
+long rbtree_api_add_bad_cb_bad_fn_call_remove(void *ctx)
+{
+ return add_with_cb(less__bad_fn_call_remove);
+}
+
+SEC("?tc")
+__failure __msg("can't spin_{lock,unlock} in rbtree cb")
+long rbtree_api_add_bad_cb_bad_fn_call_first_unlock_after(void *ctx)
+{
+ return add_with_cb(less__bad_fn_call_first_unlock_after);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_search.c b/tools/testing/selftests/bpf/progs/rbtree_search.c
new file mode 100644
index 000000000000..098ef970fac1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_search.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct node_data {
+ struct bpf_refcount ref;
+ struct bpf_rb_node r0;
+ struct bpf_rb_node r1;
+ int key0;
+ int key1;
+};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock0;
+private(A) struct bpf_rb_root groot0 __contains(node_data, r0);
+
+private(B) struct bpf_spin_lock glock1;
+private(B) struct bpf_rb_root groot1 __contains(node_data, r1);
+
+#define rb_entry(ptr, type, member) container_of(ptr, type, member)
+#define NR_NODES 16
+
+int zero = 0;
+
+static bool less0(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = rb_entry(a, struct node_data, r0);
+ node_b = rb_entry(b, struct node_data, r0);
+
+ return node_a->key0 < node_b->key0;
+}
+
+static bool less1(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = rb_entry(a, struct node_data, r1);
+ node_b = rb_entry(b, struct node_data, r1);
+
+ return node_a->key1 < node_b->key1;
+}
+
+SEC("syscall")
+__retval(0)
+long rbtree_search(void *ctx)
+{
+ struct bpf_rb_node *rb_n, *rb_m, *gc_ns[NR_NODES];
+ long lookup_key = NR_NODES / 2;
+ struct node_data *n, *m;
+ int i, nr_gc = 0;
+
+ for (i = zero; i < NR_NODES && can_loop; i++) {
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return __LINE__;
+
+ m = bpf_refcount_acquire(n);
+
+ n->key0 = i;
+ m->key1 = i;
+
+ bpf_spin_lock(&glock0);
+ bpf_rbtree_add(&groot0, &n->r0, less0);
+ bpf_spin_unlock(&glock0);
+
+ bpf_spin_lock(&glock1);
+ bpf_rbtree_add(&groot1, &m->r1, less1);
+ bpf_spin_unlock(&glock1);
+ }
+
+ n = NULL;
+ bpf_spin_lock(&glock0);
+ rb_n = bpf_rbtree_root(&groot0);
+ while (can_loop) {
+ if (!rb_n) {
+ bpf_spin_unlock(&glock0);
+ return __LINE__;
+ }
+
+ n = rb_entry(rb_n, struct node_data, r0);
+ if (lookup_key == n->key0)
+ break;
+ if (nr_gc < NR_NODES)
+ gc_ns[nr_gc++] = rb_n;
+ if (lookup_key < n->key0)
+ rb_n = bpf_rbtree_left(&groot0, rb_n);
+ else
+ rb_n = bpf_rbtree_right(&groot0, rb_n);
+ }
+
+ if (!n || lookup_key != n->key0) {
+ bpf_spin_unlock(&glock0);
+ return __LINE__;
+ }
+
+ for (i = 0; i < nr_gc; i++) {
+ rb_n = gc_ns[i];
+ gc_ns[i] = bpf_rbtree_remove(&groot0, rb_n);
+ }
+
+ m = bpf_refcount_acquire(n);
+ bpf_spin_unlock(&glock0);
+
+ for (i = 0; i < nr_gc; i++) {
+ rb_n = gc_ns[i];
+ if (rb_n) {
+ n = rb_entry(rb_n, struct node_data, r0);
+ bpf_obj_drop(n);
+ }
+ }
+
+ if (!m)
+ return __LINE__;
+
+ bpf_spin_lock(&glock1);
+ rb_m = bpf_rbtree_remove(&groot1, &m->r1);
+ bpf_spin_unlock(&glock1);
+ bpf_obj_drop(m);
+ if (!rb_m)
+ return __LINE__;
+ bpf_obj_drop(rb_entry(rb_m, struct node_data, r1));
+
+ return 0;
+}
+
+#define TEST_ROOT(dolock) \
+SEC("syscall") \
+__failure __msg(MSG) \
+long test_root_spinlock_##dolock(void *ctx) \
+{ \
+ struct bpf_rb_node *rb_n; \
+ __u64 jiffies = 0; \
+ \
+ if (dolock) \
+ bpf_spin_lock(&glock0); \
+ rb_n = bpf_rbtree_root(&groot0); \
+ if (rb_n) \
+ jiffies = bpf_jiffies64(); \
+ if (dolock) \
+ bpf_spin_unlock(&glock0); \
+ \
+ return !!jiffies; \
+}
+
+#define TEST_LR(op, dolock) \
+SEC("syscall") \
+__failure __msg(MSG) \
+long test_##op##_spinlock_##dolock(void *ctx) \
+{ \
+ struct bpf_rb_node *rb_n; \
+ struct node_data *n; \
+ __u64 jiffies = 0; \
+ \
+ bpf_spin_lock(&glock0); \
+ rb_n = bpf_rbtree_root(&groot0); \
+ if (!rb_n) { \
+ bpf_spin_unlock(&glock0); \
+ return 1; \
+ } \
+ n = rb_entry(rb_n, struct node_data, r0); \
+ n = bpf_refcount_acquire(n); \
+ bpf_spin_unlock(&glock0); \
+ if (!n) \
+ return 1; \
+ \
+ if (dolock) \
+ bpf_spin_lock(&glock0); \
+ rb_n = bpf_rbtree_##op(&groot0, &n->r0); \
+ if (rb_n) \
+ jiffies = bpf_jiffies64(); \
+ if (dolock) \
+ bpf_spin_unlock(&glock0); \
+ \
+ return !!jiffies; \
+}
+
+/*
+ * Use a spearate MSG macro instead of passing to TEST_XXX(..., MSG)
+ * to ensure the message itself is not in the bpf prog lineinfo
+ * which the verifier includes in its log.
+ * Otherwise, the test_loader will incorrectly match the prog lineinfo
+ * instead of the log generated by the verifier.
+ */
+#define MSG "call bpf_rbtree_root{{.+}}; R0{{(_w)?}}=rcu_ptr_or_null_node_data(id={{[0-9]+}},non_own_ref"
+TEST_ROOT(true)
+#undef MSG
+#define MSG "call bpf_rbtree_{{(left|right).+}}; R0{{(_w)?}}=rcu_ptr_or_null_node_data(id={{[0-9]+}},non_own_ref"
+TEST_LR(left, true)
+TEST_LR(right, true)
+#undef MSG
+
+#define MSG "bpf_spin_lock at off=0 must be held for bpf_rb_root"
+TEST_ROOT(false)
+TEST_LR(left, false)
+TEST_LR(right, false)
+#undef MSG
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c
new file mode 100644
index 000000000000..3a868a199349
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tracing_net.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+__u32 user_data, target_pid;
+__s32 key_serial;
+__u64 flags, task_storage_val, cgroup_id;
+
+struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym;
+void bpf_key_put(struct bpf_key *key) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int get_cgroup_id(void *ctx)
+{
+ struct task_struct *task;
+ struct css_set *cgroups;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ /* simulate bpf_get_current_cgroup_id() helper */
+ bpf_rcu_read_lock();
+ cgroups = task->cgroups;
+ if (!cgroups)
+ goto unlock;
+ cgroup_id = cgroups->dfl_cgrp->kn->id;
+unlock:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int task_succ(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+ long init_val = 2;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ bpf_rcu_read_lock();
+ /* region including helper using rcu ptr real_parent */
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ ptr = bpf_task_storage_get(&map_a, real_parent, &init_val,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ goto out;
+ ptr = bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ if (!ptr)
+ goto out;
+ task_storage_val = *ptr;
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int no_lock(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* old style ptr_to_btf_id is not allowed in sleepable */
+ task = bpf_get_current_task_btf();
+ real_parent = task->real_parent;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int two_regions(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* two regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int non_sleepable_1(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int non_sleepable_2(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ bpf_rcu_read_lock();
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_unlock();
+
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int task_acquire(void *ctx)
+{
+ struct task_struct *task, *real_parent, *gparent;
+
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+
+ /* rcu_ptr->rcu_field */
+ gparent = real_parent->real_parent;
+ if (!gparent)
+ goto out;
+
+ /* acquire a reference which can be used outside rcu read lock region */
+ gparent = bpf_task_acquire(gparent);
+ if (!gparent)
+ goto out;
+
+ (void)bpf_task_storage_get(&map_a, gparent, 0, 0);
+ bpf_task_release(gparent);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int miss_lock(void *ctx)
+{
+ struct task_struct *task;
+
+ /* missing bpf_rcu_read_lock() */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ (void)bpf_task_storage_get(&map_a, task, 0, 0);
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int miss_unlock(void *ctx)
+{
+ struct task_struct *task;
+
+ /* missing bpf_rcu_read_unlock() */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ (void)bpf_task_storage_get(&map_a, task, 0, 0);
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int non_sleepable_rcu_mismatch(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ task = bpf_get_current_task_btf();
+ /* non-sleepable: missing bpf_rcu_read_unlock() in one path */
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ if (real_parent)
+ bpf_rcu_read_unlock();
+out:
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int inproper_sleepable_helper(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+ struct pt_regs *regs;
+ __u32 value = 0;
+ void *ptr;
+
+ task = bpf_get_current_task_btf();
+ /* sleepable helper in rcu read lock region */
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ regs = (struct pt_regs *)bpf_task_pt_regs(real_parent);
+ if (!regs)
+ goto out;
+
+ ptr = (void *)PT_REGS_IP(regs);
+ (void)bpf_copy_from_user_task(&value, sizeof(uint32_t), ptr, task, 0);
+ user_data = value;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?lsm.s/bpf")
+int BPF_PROG(inproper_sleepable_kfunc, int cmd, union bpf_attr *attr, unsigned int size,
+ bool kernel)
+{
+ struct bpf_key *bkey;
+
+ /* sleepable kfunc in rcu read lock region */
+ bpf_rcu_read_lock();
+ bkey = bpf_lookup_user_key(key_serial, flags);
+ bpf_rcu_read_unlock();
+ if (!bkey)
+ return -1;
+ bpf_key_put(bkey);
+
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int nested_rcu_region(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* nested rcu read lock regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int task_trusted_non_rcuptr(void *ctx)
+{
+ struct task_struct *task, *group_leader;
+
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ /* the pointer group_leader is explicitly marked as trusted */
+ group_leader = task->real_parent->group_leader;
+ (void)bpf_task_storage_get(&map_a, group_leader, 0, 0);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int task_untrusted_rcuptr(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ bpf_rcu_read_unlock();
+ /* helper use of rcu ptr outside the rcu read lock region */
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int cross_rcu_region(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* rcu ptr define/use in different regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_lock();
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+__noinline
+static int static_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ if (bpf_get_prandom_u32())
+ return ret + 42;
+ return ret + bpf_get_prandom_u32();
+}
+
+__noinline
+int global_subprog(u64 a)
+{
+ volatile int ret = a;
+
+ return ret + static_subprog(NULL);
+}
+
+__noinline
+static int static_subprog_lock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ if (bpf_get_prandom_u32())
+ return ret + 42;
+ return ret + bpf_get_prandom_u32();
+}
+
+__noinline
+int global_subprog_lock(u64 a)
+{
+ volatile int ret = a;
+
+ return ret + static_subprog_lock(NULL);
+}
+
+__noinline
+static int static_subprog_unlock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_unlock();
+ if (bpf_get_prandom_u32())
+ return ret + 42;
+ return ret + bpf_get_prandom_u32();
+}
+
+__noinline
+int global_subprog_unlock(u64 a)
+{
+ volatile int ret = a;
+
+ return ret + static_subprog_unlock(NULL);
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ if (bpf_get_prandom_u32())
+ ret += static_subprog(ctx);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_global_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ if (bpf_get_prandom_u32())
+ ret += global_subprog(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_subprog_lock(void *ctx)
+{
+ volatile int ret = 0;
+
+ ret += static_subprog_lock(ctx);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_global_subprog_lock(void *ctx)
+{
+ volatile int ret = 0;
+
+ ret += global_subprog_lock(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_subprog_unlock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += static_subprog_unlock(ctx);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_global_subprog_unlock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += global_subprog_unlock(ret);
+ return 0;
+}
+
+int __noinline
+global_sleepable_helper_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+int __noinline
+global_sleepable_kfunc_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user_str(&i, sizeof(i), NULL, 0);
+ global_subprog(i);
+ return i;
+}
+
+int __noinline
+global_subprog_calling_sleepable_global(int i)
+{
+ if (!i)
+ global_sleepable_kfunc_subprog(i);
+ return i;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_sleepable_helper_global_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += global_sleepable_helper_subprog(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_sleepable_kfunc_global_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += global_sleepable_kfunc_subprog(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_sleepable_global_subprog_indirect(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += global_subprog_calling_sleepable_global(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c b/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c
new file mode 100644
index 000000000000..df4873558634
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct task_ls_map {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} task_ls_map SEC(".maps");
+
+long gp_seq;
+
+SEC("syscall")
+int do_call_rcu_tasks_trace(void *ctx)
+{
+ struct task_struct *current;
+ int *v;
+
+ current = bpf_get_current_task_btf();
+ v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!v)
+ return 1;
+ /* Invoke call_rcu_tasks_trace */
+ return bpf_task_storage_delete(&task_ls_map, current);
+}
+
+SEC("kprobe/rcu_tasks_trace_postgp")
+int rcu_tasks_trace_postgp(void *ctx)
+{
+ __sync_add_and_fetch(&gp_seq, 1);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c b/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c
new file mode 100644
index 000000000000..69da05bb6c63
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022. Huawei Technologies Co., Ltd */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+extern bool CONFIG_PREEMPTION __kconfig __weak;
+extern const int bpf_task_storage_busy __ksym;
+
+char _license[] SEC("license") = "GPL";
+
+int pid = 0;
+int busy = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} task SEC(".maps");
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(read_bpf_task_storage_busy)
+{
+ int *value;
+
+ if (!CONFIG_PREEMPTION)
+ return 0;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ value = bpf_this_cpu_ptr(&bpf_task_storage_busy);
+ if (value)
+ busy = *value;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c b/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c
new file mode 100644
index 000000000000..405adbe5e8b0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t target_pid = 0;
+
+char xattr_value[64];
+static const char expected_value_a[] = "bpf_selftest_value_a";
+static const char expected_value_b[] = "bpf_selftest_value_b";
+bool found_value_a;
+bool found_value_b;
+
+SEC("lsm.s/file_open")
+int BPF_PROG(test_file_open)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup_subsys_state *css, *tmp;
+ struct bpf_dynptr value_ptr;
+ struct cgroup *cgrp;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+ return 0;
+
+ bpf_rcu_read_lock();
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp) {
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ css = &cgrp->self;
+ bpf_dynptr_from_mem(xattr_value, sizeof(xattr_value), 0, &value_ptr);
+ bpf_for_each(css, tmp, css, BPF_CGROUP_ITER_ANCESTORS_UP) {
+ int ret;
+
+ ret = bpf_cgroup_read_xattr(tmp->cgroup, "user.bpf_test",
+ &value_ptr);
+ if (ret < 0)
+ continue;
+
+ if (ret == sizeof(expected_value_a) &&
+ !bpf_strncmp(xattr_value, sizeof(expected_value_a), expected_value_a))
+ found_value_a = true;
+ if (ret == sizeof(expected_value_b) &&
+ !bpf_strncmp(xattr_value, sizeof(expected_value_b), expected_value_b))
+ found_value_b = true;
+ }
+
+ bpf_rcu_read_unlock();
+ bpf_cgroup_release(cgrp);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/read_vsyscall.c b/tools/testing/selftests/bpf/progs/read_vsyscall.c
new file mode 100644
index 000000000000..395591374d4f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/read_vsyscall.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include "vmlinux.h"
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+int target_pid = 0;
+void *user_ptr = 0;
+int read_ret[10];
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * These are the kfuncs, the others are helpers
+ */
+int bpf_copy_from_user_str(void *dst, u32, const void *, u64) __weak __ksym;
+int bpf_copy_from_user_task_str(void *dst, u32, const void *,
+ struct task_struct *, u64) __weak __ksym;
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int do_probe_read(void *ctx)
+{
+ char buf[8];
+
+ if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+ return 0;
+
+ read_ret[0] = bpf_probe_read_kernel(buf, sizeof(buf), user_ptr);
+ read_ret[1] = bpf_probe_read_kernel_str(buf, sizeof(buf), user_ptr);
+ read_ret[2] = bpf_probe_read(buf, sizeof(buf), user_ptr);
+ read_ret[3] = bpf_probe_read_str(buf, sizeof(buf), user_ptr);
+ read_ret[4] = bpf_probe_read_user(buf, sizeof(buf), user_ptr);
+ read_ret[5] = bpf_probe_read_user_str(buf, sizeof(buf), user_ptr);
+
+ return 0;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int do_copy_from_user(void *ctx)
+{
+ char buf[8];
+
+ if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+ return 0;
+
+ read_ret[6] = bpf_copy_from_user(buf, sizeof(buf), user_ptr);
+ read_ret[7] = bpf_copy_from_user_task(buf, sizeof(buf), user_ptr,
+ bpf_get_current_task_btf(), 0);
+ read_ret[8] = bpf_copy_from_user_str((char *)buf, sizeof(buf), user_ptr, 0);
+ read_ret[9] = bpf_copy_from_user_task_str((char *)buf,
+ sizeof(buf),
+ user_ptr,
+ bpf_get_current_task_btf(),
+ 0);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/recursion.c b/tools/testing/selftests/bpf/progs/recursion.c
new file mode 100644
index 000000000000..3c2423bb19e2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/recursion.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, long);
+} hash1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, long);
+} hash2 SEC(".maps");
+
+int pass1 = 0;
+int pass2 = 0;
+
+SEC("fentry/htab_map_delete_elem")
+int BPF_PROG(on_delete, struct bpf_map *map)
+{
+ int key = 0;
+
+ if (map == (void *)&hash1) {
+ pass1++;
+ return 0;
+ }
+ if (map == (void *)&hash2) {
+ pass2++;
+ bpf_map_delete_elem(&hash2, &key);
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/recvmsg4_prog.c b/tools/testing/selftests/bpf/progs/recvmsg4_prog.c
new file mode 100644
index 000000000000..59748c95471a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/recvmsg4_prog.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <sys/socket.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include <bpf_sockopt_helpers.h>
+
+#define SERV4_IP 0xc0a801feU /* 192.168.1.254 */
+#define SERV4_PORT 4040
+
+SEC("cgroup/recvmsg4")
+int recvmsg4_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock *sk;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ if (sk->family != AF_INET)
+ return 1;
+
+ if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
+ return 1;
+
+ if (!get_set_sk_priority(ctx))
+ return 1;
+
+ ctx->user_ip4 = bpf_htonl(SERV4_IP);
+ ctx->user_port = bpf_htons(SERV4_PORT);
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/recvmsg6_prog.c b/tools/testing/selftests/bpf/progs/recvmsg6_prog.c
new file mode 100644
index 000000000000..d9a4016596d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/recvmsg6_prog.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <linux/in6.h>
+#include <sys/socket.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include <bpf_sockopt_helpers.h>
+
+#define SERV6_IP_0 0xfaceb00c /* face:b00c:1234:5678::abcd */
+#define SERV6_IP_1 0x12345678
+#define SERV6_IP_2 0x00000000
+#define SERV6_IP_3 0x0000abcd
+#define SERV6_PORT 6060
+
+SEC("cgroup/recvmsg6")
+int recvmsg6_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock *sk;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ if (sk->family != AF_INET6)
+ return 1;
+
+ if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
+ return 1;
+
+ if (!get_set_sk_priority(ctx))
+ return 1;
+
+ ctx->user_ip6[0] = bpf_htonl(SERV6_IP_0);
+ ctx->user_ip6[1] = bpf_htonl(SERV6_IP_1);
+ ctx->user_ip6[2] = bpf_htonl(SERV6_IP_2);
+ ctx->user_ip6[3] = bpf_htonl(SERV6_IP_3);
+ ctx->user_port = bpf_htons(SERV6_PORT);
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c
new file mode 100644
index 000000000000..1c7ab44bccfa
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_ADDRESS[] = "\0bpf_cgroup_unix_test";
+
+SEC("cgroup/recvmsg_unix")
+int recvmsg_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_ADDRESS) - 1;
+ int ret;
+
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_ADDRESS,
+ sizeof(SERVUN_ADDRESS) - 1);
+ if (ret)
+ return 1;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 1;
+
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_ADDRESS,
+ sizeof(SERVUN_ADDRESS) - 1) != 0)
+ return 1;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
new file mode 100644
index 000000000000..893a4fdb4b6e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+extern void bpf_rcu_read_lock(void) __ksym;
+extern void bpf_rcu_read_unlock(void) __ksym;
+
+struct node_data {
+ long key;
+ long list_data;
+ struct bpf_rb_node r;
+ struct bpf_list_node l;
+ struct bpf_refcount ref;
+};
+
+struct map_value {
+ struct node_data __kptr *node;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 2);
+} stashed_nodes SEC(".maps");
+
+struct node_acquire {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+ struct bpf_refcount refcount;
+};
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock lock;
+private(A) struct bpf_rb_root root __contains(node_data, r);
+private(A) struct bpf_list_head head __contains(node_data, l);
+
+private(B) struct bpf_spin_lock alock;
+private(B) struct bpf_rb_root aroot __contains(node_acquire, node);
+
+private(C) struct bpf_spin_lock block;
+private(C) struct bpf_rb_root broot __contains(node_data, r);
+
+static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b)
+{
+ struct node_data *a;
+ struct node_data *b;
+
+ a = container_of(node_a, struct node_data, r);
+ b = container_of(node_b, struct node_data, r);
+
+ return a->key < b->key;
+}
+
+static bool less_a(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_acquire *node_a;
+ struct node_acquire *node_b;
+
+ node_a = container_of(a, struct node_acquire, node);
+ node_b = container_of(b, struct node_acquire, node);
+
+ return node_a->key < node_b->key;
+}
+
+static long __insert_in_tree_and_list(struct bpf_list_head *head,
+ struct bpf_rb_root *root,
+ struct bpf_spin_lock *lock)
+{
+ struct node_data *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return -1;
+
+ m = bpf_refcount_acquire(n);
+ m->key = 123;
+ m->list_data = 456;
+
+ bpf_spin_lock(lock);
+ if (bpf_rbtree_add(root, &n->r, less)) {
+ /* Failure to insert - unexpected */
+ bpf_spin_unlock(lock);
+ bpf_obj_drop(m);
+ return -2;
+ }
+ bpf_spin_unlock(lock);
+
+ bpf_spin_lock(lock);
+ if (bpf_list_push_front(head, &m->l)) {
+ /* Failure to insert - unexpected */
+ bpf_spin_unlock(lock);
+ return -3;
+ }
+ bpf_spin_unlock(lock);
+ return 0;
+}
+
+static long __stash_map_insert_tree(int idx, int val, struct bpf_rb_root *root,
+ struct bpf_spin_lock *lock)
+{
+ struct map_value *mapval;
+ struct node_data *n, *m;
+
+ mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
+ if (!mapval)
+ return -1;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return -2;
+
+ n->key = val;
+ m = bpf_refcount_acquire(n);
+
+ n = bpf_kptr_xchg(&mapval->node, n);
+ if (n) {
+ bpf_obj_drop(n);
+ bpf_obj_drop(m);
+ return -3;
+ }
+
+ bpf_spin_lock(lock);
+ if (bpf_rbtree_add(root, &m->r, less)) {
+ /* Failure to insert - unexpected */
+ bpf_spin_unlock(lock);
+ return -4;
+ }
+ bpf_spin_unlock(lock);
+ return 0;
+}
+
+static long __read_from_tree(struct bpf_rb_root *root,
+ struct bpf_spin_lock *lock,
+ bool remove_from_tree)
+{
+ struct bpf_rb_node *rb;
+ struct node_data *n;
+ long res = -99;
+
+ bpf_spin_lock(lock);
+
+ rb = bpf_rbtree_first(root);
+ if (!rb) {
+ bpf_spin_unlock(lock);
+ return -1;
+ }
+
+ n = container_of(rb, struct node_data, r);
+ res = n->key;
+
+ if (!remove_from_tree) {
+ bpf_spin_unlock(lock);
+ return res;
+ }
+
+ rb = bpf_rbtree_remove(root, rb);
+ bpf_spin_unlock(lock);
+ if (!rb)
+ return -2;
+ n = container_of(rb, struct node_data, r);
+ bpf_obj_drop(n);
+ return res;
+}
+
+static long __read_from_list(struct bpf_list_head *head,
+ struct bpf_spin_lock *lock,
+ bool remove_from_list)
+{
+ struct bpf_list_node *l;
+ struct node_data *n;
+ long res = -99;
+
+ bpf_spin_lock(lock);
+
+ l = bpf_list_pop_front(head);
+ if (!l) {
+ bpf_spin_unlock(lock);
+ return -1;
+ }
+
+ n = container_of(l, struct node_data, l);
+ res = n->list_data;
+
+ if (!remove_from_list) {
+ if (bpf_list_push_back(head, &n->l)) {
+ bpf_spin_unlock(lock);
+ return -2;
+ }
+ }
+
+ bpf_spin_unlock(lock);
+
+ if (remove_from_list)
+ bpf_obj_drop(n);
+ return res;
+}
+
+static long __read_from_unstash(int idx)
+{
+ struct node_data *n = NULL;
+ struct map_value *mapval;
+ long val = -99;
+
+ mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
+ if (!mapval)
+ return -1;
+
+ n = bpf_kptr_xchg(&mapval->node, n);
+ if (!n)
+ return -2;
+
+ val = n->key;
+ bpf_obj_drop(n);
+ return val;
+}
+
+#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \
+SEC("tc") \
+__description(desc) \
+__success __retval(579) \
+long insert_and_remove_tree_##rem_tree##_list_##rem_list(void *ctx) \
+{ \
+ long err, tree_data, list_data; \
+ \
+ err = __insert_in_tree_and_list(&head, &root, &lock); \
+ if (err) \
+ return err; \
+ \
+ err = __read_from_tree(&root, &lock, rem_tree); \
+ if (err < 0) \
+ return err; \
+ else \
+ tree_data = err; \
+ \
+ err = __read_from_list(&head, &lock, rem_list); \
+ if (err < 0) \
+ return err; \
+ else \
+ list_data = err; \
+ \
+ return tree_data + list_data; \
+}
+
+/* After successful insert of struct node_data into both collections:
+ * - it should have refcount = 2
+ * - removing / not removing the node_data from a collection after
+ * reading should have no effect on ability to read / remove from
+ * the other collection
+ */
+INSERT_READ_BOTH(true, true, "insert_read_both: remove from tree + list");
+INSERT_READ_BOTH(false, false, "insert_read_both: remove from neither");
+INSERT_READ_BOTH(true, false, "insert_read_both: remove from tree");
+INSERT_READ_BOTH(false, true, "insert_read_both: remove from list");
+
+#undef INSERT_READ_BOTH
+#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \
+SEC("tc") \
+__description(desc) \
+__success __retval(579) \
+long insert_and_remove_lf_tree_##rem_tree##_list_##rem_list(void *ctx) \
+{ \
+ long err, tree_data, list_data; \
+ \
+ err = __insert_in_tree_and_list(&head, &root, &lock); \
+ if (err) \
+ return err; \
+ \
+ err = __read_from_list(&head, &lock, rem_list); \
+ if (err < 0) \
+ return err; \
+ else \
+ list_data = err; \
+ \
+ err = __read_from_tree(&root, &lock, rem_tree); \
+ if (err < 0) \
+ return err; \
+ else \
+ tree_data = err; \
+ \
+ return tree_data + list_data; \
+}
+
+/* Similar to insert_read_both, but list data is read and possibly removed
+ * first
+ *
+ * Results should be no different than reading and possibly removing rbtree
+ * node first
+ */
+INSERT_READ_BOTH(true, true, "insert_read_both_list_first: remove from tree + list");
+INSERT_READ_BOTH(false, false, "insert_read_both_list_first: remove from neither");
+INSERT_READ_BOTH(true, false, "insert_read_both_list_first: remove from tree");
+INSERT_READ_BOTH(false, true, "insert_read_both_list_first: remove from list");
+
+#define INSERT_DOUBLE_READ_AND_DEL(read_fn, read_root, desc) \
+SEC("tc") \
+__description(desc) \
+__success __retval(-1) \
+long insert_double_##read_fn##_and_del_##read_root(void *ctx) \
+{ \
+ long err, list_data; \
+ \
+ err = __insert_in_tree_and_list(&head, &root, &lock); \
+ if (err) \
+ return err; \
+ \
+ err = read_fn(&read_root, &lock, true); \
+ if (err < 0) \
+ return err; \
+ else \
+ list_data = err; \
+ \
+ err = read_fn(&read_root, &lock, true); \
+ if (err < 0) \
+ return err; \
+ \
+ return err + list_data; \
+}
+
+/* Insert into both tree and list, then try reading-and-removing from either twice
+ *
+ * The second read-and-remove should fail on read step since the node has
+ * already been removed
+ */
+INSERT_DOUBLE_READ_AND_DEL(__read_from_tree, root, "insert_double_del: 2x read-and-del from tree");
+INSERT_DOUBLE_READ_AND_DEL(__read_from_list, head, "insert_double_del: 2x read-and-del from list");
+
+#define INSERT_STASH_READ(rem_tree, desc) \
+SEC("tc") \
+__description(desc) \
+__success __retval(84) \
+long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \
+{ \
+ long err, tree_data, map_data; \
+ \
+ err = __stash_map_insert_tree(0, 42, &root, &lock); \
+ if (err) \
+ return err; \
+ \
+ err = __read_from_tree(&root, &lock, rem_tree); \
+ if (err < 0) \
+ return err; \
+ else \
+ tree_data = err; \
+ \
+ err = __read_from_unstash(0); \
+ if (err < 0) \
+ return err; \
+ else \
+ map_data = err; \
+ \
+ return tree_data + map_data; \
+}
+
+/* Stash a refcounted node in map_val, insert same node into tree, then try
+ * reading data from tree then unstashed map_val, possibly removing from tree
+ *
+ * Removing from tree should have no effect on map_val kptr validity
+ */
+INSERT_STASH_READ(true, "insert_stash_read: remove from tree");
+INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree");
+
+SEC("tc")
+__success
+long rbtree_refcounted_node_ref_escapes(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&alock);
+ bpf_rbtree_add(&aroot, &n->node, less_a);
+ m = bpf_refcount_acquire(n);
+ bpf_spin_unlock(&alock);
+ if (!m)
+ return 2;
+
+ m->key = 2;
+ bpf_obj_drop(m);
+ return 0;
+}
+
+SEC("tc")
+__success
+long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ m = bpf_refcount_acquire(n);
+ m->key = 2;
+
+ bpf_spin_lock(&alock);
+ bpf_rbtree_add(&aroot, &n->node, less_a);
+ bpf_spin_unlock(&alock);
+
+ bpf_obj_drop(m);
+
+ return 0;
+}
+
+static long __stash_map_empty_xchg(struct node_data *n, int idx)
+{
+ struct map_value *mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
+
+ if (!mapval) {
+ bpf_obj_drop(n);
+ return 1;
+ }
+ n = bpf_kptr_xchg(&mapval->node, n);
+ if (n) {
+ bpf_obj_drop(n);
+ return 2;
+ }
+ return 0;
+}
+
+SEC("tc")
+long rbtree_wrong_owner_remove_fail_a1(void *ctx)
+{
+ struct node_data *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+ m = bpf_refcount_acquire(n);
+
+ if (__stash_map_empty_xchg(n, 0)) {
+ bpf_obj_drop(m);
+ return 2;
+ }
+
+ if (__stash_map_empty_xchg(m, 1))
+ return 3;
+
+ return 0;
+}
+
+SEC("tc")
+long rbtree_wrong_owner_remove_fail_b(void *ctx)
+{
+ struct map_value *mapval;
+ struct node_data *n;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ n = bpf_kptr_xchg(&mapval->node, NULL);
+ if (!n)
+ return 2;
+
+ bpf_spin_lock(&block);
+
+ bpf_rbtree_add(&broot, &n->r, less);
+
+ bpf_spin_unlock(&block);
+ return 0;
+}
+
+SEC("tc")
+long rbtree_wrong_owner_remove_fail_a2(void *ctx)
+{
+ struct map_value *mapval;
+ struct bpf_rb_node *res;
+ struct node_data *m;
+ int idx = 1;
+
+ mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ m = bpf_kptr_xchg(&mapval->node, NULL);
+ if (!m)
+ return 2;
+ bpf_spin_lock(&lock);
+
+ /* make m non-owning ref */
+ bpf_list_push_back(&head, &m->l);
+ res = bpf_rbtree_remove(&root, &m->r);
+
+ bpf_spin_unlock(&lock);
+ if (res) {
+ bpf_obj_drop(container_of(res, struct node_data, r));
+ return 3;
+ }
+ return 0;
+}
+
+SEC("?fentry.s/bpf_testmod_test_read")
+__success
+int BPF_PROG(rbtree_sleepable_rcu,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ struct bpf_rb_node *rb;
+ struct node_data *n, *m = NULL;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 0;
+
+ bpf_rcu_read_lock();
+ bpf_spin_lock(&lock);
+ bpf_rbtree_add(&root, &n->r, less);
+ rb = bpf_rbtree_first(&root);
+ if (!rb)
+ goto err_out;
+
+ rb = bpf_rbtree_remove(&root, rb);
+ if (!rb)
+ goto err_out;
+
+ m = container_of(rb, struct node_data, r);
+
+err_out:
+ bpf_spin_unlock(&lock);
+ bpf_rcu_read_unlock();
+ if (m)
+ bpf_obj_drop(m);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_testmod_test_read")
+__success
+int BPF_PROG(rbtree_sleepable_rcu_no_explicit_rcu_lock,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ struct bpf_rb_node *rb;
+ struct node_data *n, *m = NULL;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 0;
+
+ /* No explicit bpf_rcu_read_lock */
+ bpf_spin_lock(&lock);
+ bpf_rbtree_add(&root, &n->r, less);
+ rb = bpf_rbtree_first(&root);
+ if (!rb)
+ goto err_out;
+
+ rb = bpf_rbtree_remove(&root, rb);
+ if (!rb)
+ goto err_out;
+
+ m = container_of(rb, struct node_data, r);
+
+err_out:
+ bpf_spin_unlock(&lock);
+ /* No explicit bpf_rcu_read_unlock */
+ if (m)
+ bpf_obj_drop(m);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
new file mode 100644
index 000000000000..836c8ab7b908
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct node_acquire {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+ struct bpf_refcount refcount;
+};
+
+extern void bpf_rcu_read_lock(void) __ksym;
+extern void bpf_rcu_read_unlock(void) __ksym;
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_acquire, node);
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_acquire *node_a;
+ struct node_acquire *node_b;
+
+ node_a = container_of(a, struct node_acquire, node);
+ node_b = container_of(b, struct node_acquire, node);
+
+ return node_a->key < node_b->key;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=4 alloc_insn={{[0-9]+}}")
+long rbtree_refcounted_node_ref_escapes(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ /* m becomes an owning ref but is never drop'd or added to a tree */
+ m = bpf_refcount_acquire(n);
+ bpf_spin_unlock(&glock);
+ if (!m)
+ return 2;
+
+ m->key = 2;
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+long refcount_acquire_maybe_null(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ /* Intentionally not testing !n
+ * it's MAYBE_NULL for refcount_acquire
+ */
+ m = bpf_refcount_acquire(n);
+ if (m)
+ bpf_obj_drop(m);
+ if (n)
+ bpf_obj_drop(n);
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}")
+long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ /* m becomes an owning ref but is never drop'd or added to a tree */
+ m = bpf_refcount_acquire(n);
+ m->key = 2;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?fentry.s/bpf_testmod_test_read")
+__failure __msg("function calls are not allowed while holding a lock")
+int BPF_PROG(rbtree_fail_sleepable_lock_across_rcu,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ struct node_acquire *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 0;
+
+ /* spin_{lock,unlock} are in different RCU CS */
+ bpf_rcu_read_lock();
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_rcu_read_unlock();
+
+ bpf_rcu_read_lock();
+ bpf_spin_unlock(&glock);
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/res_spin_lock.c b/tools/testing/selftests/bpf/progs/res_spin_lock.c
new file mode 100644
index 000000000000..22c4fb8b9266
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/res_spin_lock.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define EDEADLK 35
+#define ETIMEDOUT 110
+
+struct arr_elem {
+ struct bpf_res_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 64);
+ __type(key, int);
+ __type(value, struct arr_elem);
+} arrmap SEC(".maps");
+
+struct bpf_res_spin_lock lockA __hidden SEC(".data.A");
+struct bpf_res_spin_lock lockB __hidden SEC(".data.B");
+
+SEC("tc")
+int res_spin_lock_test(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem1, *elem2;
+ int r;
+
+ elem1 = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem1)
+ return -1;
+ elem2 = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem2)
+ return -1;
+
+ r = bpf_res_spin_lock(&elem1->lock);
+ if (r)
+ return r;
+ r = bpf_res_spin_lock(&elem2->lock);
+ if (!r) {
+ bpf_res_spin_unlock(&elem2->lock);
+ bpf_res_spin_unlock(&elem1->lock);
+ return -1;
+ }
+ bpf_res_spin_unlock(&elem1->lock);
+ return r != -EDEADLK;
+}
+
+SEC("tc")
+int res_spin_lock_test_AB(struct __sk_buff *ctx)
+{
+ int r;
+
+ r = bpf_res_spin_lock(&lockA);
+ if (r)
+ return !r;
+ /* Only unlock if we took the lock. */
+ if (!bpf_res_spin_lock(&lockB))
+ bpf_res_spin_unlock(&lockB);
+ bpf_res_spin_unlock(&lockA);
+ return 0;
+}
+
+int err;
+
+SEC("tc")
+int res_spin_lock_test_BA(struct __sk_buff *ctx)
+{
+ int r;
+
+ r = bpf_res_spin_lock(&lockB);
+ if (r)
+ return !r;
+ if (!bpf_res_spin_lock(&lockA))
+ bpf_res_spin_unlock(&lockA);
+ else
+ err = -EDEADLK;
+ bpf_res_spin_unlock(&lockB);
+ return err ?: 0;
+}
+
+SEC("tc")
+int res_spin_lock_test_held_lock_max(struct __sk_buff *ctx)
+{
+ struct bpf_res_spin_lock *locks[48] = {};
+ struct arr_elem *e;
+ u64 time_beg, time;
+ int ret = 0, i;
+
+ _Static_assert(ARRAY_SIZE(((struct rqspinlock_held){}).locks) == 31,
+ "RES_NR_HELD assumed to be 31");
+
+ for (i = 0; i < 34; i++) {
+ int key = i;
+
+ /* We cannot pass in i as it will get spilled/filled by the compiler and
+ * loses bounds in verifier state.
+ */
+ e = bpf_map_lookup_elem(&arrmap, &key);
+ if (!e)
+ return 1;
+ locks[i] = &e->lock;
+ }
+
+ for (; i < 48; i++) {
+ int key = i - 2;
+
+ /* We cannot pass in i as it will get spilled/filled by the compiler and
+ * loses bounds in verifier state.
+ */
+ e = bpf_map_lookup_elem(&arrmap, &key);
+ if (!e)
+ return 1;
+ locks[i] = &e->lock;
+ }
+
+ time_beg = bpf_ktime_get_ns();
+ for (i = 0; i < 34; i++) {
+ if (bpf_res_spin_lock(locks[i]))
+ goto end;
+ }
+
+ /* Trigger AA, after exhausting entries in the held lock table. This
+ * time, only the timeout can save us, as AA detection won't succeed.
+ */
+ ret = bpf_res_spin_lock(locks[34]);
+ if (!ret) {
+ bpf_res_spin_unlock(locks[34]);
+ ret = 1;
+ goto end;
+ }
+
+ ret = ret != -ETIMEDOUT ? 2 : 0;
+
+end:
+ for (i = i - 1; i >= 0; i--)
+ bpf_res_spin_unlock(locks[i]);
+ time = bpf_ktime_get_ns() - time_beg;
+ /* Time spent should be easily above our limit (1/4 s), since AA
+ * detection won't be expedited due to lack of held lock entry.
+ */
+ return ret ?: (time > 1000000000 / 4 ? 0 : 1);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/res_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/res_spin_lock_fail.c
new file mode 100644
index 000000000000..330682a88c16
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/res_spin_lock_fail.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct arr_elem {
+ struct bpf_res_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct arr_elem);
+} arrmap SEC(".maps");
+
+long value;
+
+struct bpf_spin_lock lock __hidden SEC(".data.A");
+struct bpf_res_spin_lock res_lock __hidden SEC(".data.B");
+
+SEC("?tc")
+__failure __msg("point to map value or allocated object")
+int res_spin_lock_arg(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ bpf_res_spin_lock((struct bpf_res_spin_lock *)bpf_core_cast(&elem->lock, struct __sk_buff));
+ bpf_res_spin_lock(&elem->lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("AA deadlock detected")
+int res_spin_lock_AA(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ bpf_res_spin_lock(&elem->lock);
+ bpf_res_spin_lock(&elem->lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("AA deadlock detected")
+int res_spin_lock_cond_AA(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ if (bpf_res_spin_lock(&elem->lock))
+ return 0;
+ bpf_res_spin_lock(&elem->lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("unlock of different lock")
+int res_spin_lock_mismatch_1(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ if (bpf_res_spin_lock(&elem->lock))
+ return 0;
+ bpf_res_spin_unlock(&res_lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("unlock of different lock")
+int res_spin_lock_mismatch_2(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ if (bpf_res_spin_lock(&res_lock))
+ return 0;
+ bpf_res_spin_unlock(&elem->lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("unlock of different lock")
+int res_spin_lock_irq_mismatch_1(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+ unsigned long f1;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ bpf_local_irq_save(&f1);
+ if (bpf_res_spin_lock(&res_lock))
+ return 0;
+ bpf_res_spin_unlock_irqrestore(&res_lock, &f1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("unlock of different lock")
+int res_spin_lock_irq_mismatch_2(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+ unsigned long f1;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ if (bpf_res_spin_lock_irqsave(&res_lock, &f1))
+ return 0;
+ bpf_res_spin_unlock(&res_lock);
+ return 0;
+}
+
+SEC("?tc")
+__success
+int res_spin_lock_ooo(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ if (bpf_res_spin_lock(&res_lock))
+ return 0;
+ if (bpf_res_spin_lock(&elem->lock)) {
+ bpf_res_spin_unlock(&res_lock);
+ return 0;
+ }
+ bpf_res_spin_unlock(&elem->lock);
+ bpf_res_spin_unlock(&res_lock);
+ return 0;
+}
+
+SEC("?tc")
+__success
+int res_spin_lock_ooo_irq(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+ unsigned long f1, f2;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ if (bpf_res_spin_lock_irqsave(&res_lock, &f1))
+ return 0;
+ if (bpf_res_spin_lock_irqsave(&elem->lock, &f2)) {
+ bpf_res_spin_unlock_irqrestore(&res_lock, &f1);
+ /* We won't have a unreleased IRQ flag error here. */
+ return 0;
+ }
+ bpf_res_spin_unlock_irqrestore(&elem->lock, &f2);
+ bpf_res_spin_unlock_irqrestore(&res_lock, &f1);
+ return 0;
+}
+
+struct bpf_res_spin_lock lock1 __hidden SEC(".data.OO1");
+struct bpf_res_spin_lock lock2 __hidden SEC(".data.OO2");
+
+SEC("?tc")
+__failure __msg("bpf_res_spin_unlock cannot be out of order")
+int res_spin_lock_ooo_unlock(struct __sk_buff *ctx)
+{
+ if (bpf_res_spin_lock(&lock1))
+ return 0;
+ if (bpf_res_spin_lock(&lock2)) {
+ bpf_res_spin_unlock(&lock1);
+ return 0;
+ }
+ bpf_res_spin_unlock(&lock1);
+ bpf_res_spin_unlock(&lock2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("off 1 doesn't point to 'struct bpf_res_spin_lock' that is at 0")
+int res_spin_lock_bad_off(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ bpf_res_spin_lock((void *)&elem->lock + 1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("R1 doesn't have constant offset. bpf_res_spin_lock has to be at the constant offset")
+int res_spin_lock_var_off(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+ u64 val = value;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem) {
+ // FIXME: Only inline assembly use in assert macro doesn't emit
+ // BTF definition.
+ bpf_throw(0);
+ return 0;
+ }
+ bpf_assert_range(val, 0, 40);
+ bpf_res_spin_lock((void *)&value + val);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("map 'res_spin.bss' has no valid bpf_res_spin_lock")
+int res_spin_lock_no_lock_map(struct __sk_buff *ctx)
+{
+ bpf_res_spin_lock((void *)&value + 1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("local 'kptr' has no valid bpf_res_spin_lock")
+int res_spin_lock_no_lock_kptr(struct __sk_buff *ctx)
+{
+ struct { int i; } *p = bpf_obj_new(typeof(*p));
+
+ if (!p)
+ return 0;
+ bpf_res_spin_lock((void *)p);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/ringbuf_bench.c b/tools/testing/selftests/bpf/progs/ringbuf_bench.c
index 123607d314d6..6a468496f539 100644
--- a/tools/testing/selftests/bpf/progs/ringbuf_bench.c
+++ b/tools/testing/selftests/bpf/progs/ringbuf_bench.c
@@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
@@ -30,7 +31,7 @@ static __always_inline long get_flags()
return sz >= wakeup_data_size ? BPF_RB_FORCE_WAKEUP : BPF_RB_NO_WAKEUP;
}
-SEC("fentry/__x64_sys_getpgid")
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bench_ringbuf(void *ctx)
{
long *sample, flags;
diff --git a/tools/testing/selftests/bpf/progs/sample_map_ret0.c b/tools/testing/selftests/bpf/progs/sample_map_ret0.c
deleted file mode 100644
index 1612a32007b6..000000000000
--- a/tools/testing/selftests/bpf/progs/sample_map_ret0.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-
-struct bpf_map_def SEC("maps") htab = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(__u32),
- .value_size = sizeof(long),
- .max_entries = 2,
-};
-
-struct bpf_map_def SEC("maps") array = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(long),
- .max_entries = 2,
-};
-
-/* Sample program which should always load for testing control paths. */
-SEC(".text") int func()
-{
- __u64 key64 = 0;
- __u32 key = 0;
- long *value;
-
- value = bpf_map_lookup_elem(&htab, &key);
- if (!value)
- return 1;
- value = bpf_map_lookup_elem(&array, &key64);
- if (!value)
- return 1;
-
- return 0;
-}
diff --git a/tools/testing/selftests/bpf/progs/sample_ret0.c b/tools/testing/selftests/bpf/progs/sample_ret0.c
deleted file mode 100644
index fec99750d6ea..000000000000
--- a/tools/testing/selftests/bpf/progs/sample_ret0.c
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-
-/* Sample program which should always load for testing control paths. */
-int func()
-{
- return 0;
-}
diff --git a/tools/testing/selftests/bpf/progs/security_bpf_map.c b/tools/testing/selftests/bpf/progs/security_bpf_map.c
new file mode 100644
index 000000000000..7216b3450e96
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/security_bpf_map.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define EPERM 1 /* Operation not permitted */
+
+/* From include/linux/mm.h. */
+#define FMODE_WRITE 0x2
+
+struct map;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} prot_status_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 3);
+} prot_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 3);
+} not_prot_map SEC(".maps");
+
+SEC("fmod_ret/security_bpf_map")
+int BPF_PROG(fmod_bpf_map, struct bpf_map *map, int fmode)
+{
+ __u32 key = 0;
+ __u32 *status_ptr = bpf_map_lookup_elem(&prot_status_map, &key);
+
+ if (!status_ptr || !*status_ptr)
+ return 0;
+
+ if (map == &prot_map) {
+ /* Allow read-only access */
+ if (fmode & FMODE_WRITE)
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/*
+ * This program keeps references to maps. This is needed to prevent
+ * optimizing them out.
+ */
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(fentry_dummy1, int a)
+{
+ __u32 key = 0;
+ __u32 val1 = a;
+ __u32 val2 = a + 1;
+
+ bpf_map_update_elem(&prot_map, &key, &val1, BPF_ANY);
+ bpf_map_update_elem(&not_prot_map, &key, &val2, BPF_ANY);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
index 092d9da536f3..edc159598a0e 100644
--- a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
@@ -8,6 +8,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include <bpf_sockopt_helpers.h>
+
#define SRC1_IP4 0xAC100001U /* 172.16.0.1 */
#define SRC2_IP4 0x00000000U
#define SRC_REWRITE_IP4 0x7f000004U
@@ -16,14 +18,15 @@
#define DST_PORT 4040
#define DST_REWRITE_PORT4 4444
-int _version SEC("version") = 1;
-
SEC("cgroup/sendmsg4")
int sendmsg_v4_prog(struct bpf_sock_addr *ctx)
{
if (ctx->type != SOCK_DGRAM)
return 0;
+ if (!get_set_sk_priority(ctx))
+ return 0;
+
/* Rewrite source. */
if (ctx->msg_src_ip4 == bpf_htonl(SRC1_IP4) ||
ctx->msg_src_ip4 == bpf_htonl(SRC2_IP4)) {
@@ -46,4 +49,10 @@ int sendmsg_v4_prog(struct bpf_sock_addr *ctx)
return 1;
}
+SEC("cgroup/sendmsg4")
+int sendmsg_v4_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
index 255a432bc163..36a7f960799f 100644
--- a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
@@ -8,6 +8,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include <bpf_sockopt_helpers.h>
+
#define SRC_REWRITE_IP6_0 0
#define SRC_REWRITE_IP6_1 0
#define SRC_REWRITE_IP6_2 0
@@ -18,9 +20,12 @@
#define DST_REWRITE_IP6_2 0
#define DST_REWRITE_IP6_3 1
-#define DST_REWRITE_PORT6 6666
+#define DST_REWRITE_IP6_V4_MAPPED_0 0
+#define DST_REWRITE_IP6_V4_MAPPED_1 0
+#define DST_REWRITE_IP6_V4_MAPPED_2 0x0000FFFF
+#define DST_REWRITE_IP6_V4_MAPPED_3 0xc0a80004 // 192.168.0.4
-int _version SEC("version") = 1;
+#define DST_REWRITE_PORT6 6666
SEC("cgroup/sendmsg6")
int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
@@ -28,6 +33,9 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
if (ctx->type != SOCK_DGRAM)
return 0;
+ if (!get_set_sk_priority(ctx))
+ return 0;
+
/* Rewrite source. */
if (ctx->msg_src_ip6[3] == bpf_htonl(1) ||
ctx->msg_src_ip6[3] == bpf_htonl(0)) {
@@ -56,4 +64,56 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
return 1;
}
+SEC("cgroup/sendmsg6")
+int sendmsg_v6_v4mapped_prog(struct bpf_sock_addr *ctx)
+{
+ /* Rewrite source. */
+ ctx->msg_src_ip6[0] = bpf_htonl(SRC_REWRITE_IP6_0);
+ ctx->msg_src_ip6[1] = bpf_htonl(SRC_REWRITE_IP6_1);
+ ctx->msg_src_ip6[2] = bpf_htonl(SRC_REWRITE_IP6_2);
+ ctx->msg_src_ip6[3] = bpf_htonl(SRC_REWRITE_IP6_3);
+
+ /* Rewrite destination. */
+ ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_V4_MAPPED_0);
+ ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_V4_MAPPED_1);
+ ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_V4_MAPPED_2);
+ ctx->user_ip6[3] = bpf_htonl(DST_REWRITE_IP6_V4_MAPPED_3);
+
+ ctx->user_port = bpf_htons(DST_REWRITE_PORT6);
+
+ return 1;
+}
+
+SEC("cgroup/sendmsg6")
+int sendmsg_v6_wildcard_prog(struct bpf_sock_addr *ctx)
+{
+ /* Rewrite source. */
+ ctx->msg_src_ip6[0] = bpf_htonl(SRC_REWRITE_IP6_0);
+ ctx->msg_src_ip6[1] = bpf_htonl(SRC_REWRITE_IP6_1);
+ ctx->msg_src_ip6[2] = bpf_htonl(SRC_REWRITE_IP6_2);
+ ctx->msg_src_ip6[3] = bpf_htonl(SRC_REWRITE_IP6_3);
+
+ /* Rewrite destination. */
+ ctx->user_ip6[0] = bpf_htonl(0);
+ ctx->user_ip6[1] = bpf_htonl(0);
+ ctx->user_ip6[2] = bpf_htonl(0);
+ ctx->user_ip6[3] = bpf_htonl(0);
+
+ ctx->user_port = bpf_htons(DST_REWRITE_PORT6);
+
+ return 1;
+}
+
+SEC("cgroup/sendmsg6")
+int sendmsg_v6_preserve_dst_prog(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/sendmsg6")
+int sendmsg_v6_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c
new file mode 100644
index 000000000000..332d0eb1116f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite";
+
+SEC("cgroup/sendmsg_unix")
+int sendmsg_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1;
+ int ret;
+
+ /* Rewrite destination. */
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1);
+ if (ret)
+ return 0;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 0;
+
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
+ return 0;
+
+ return 1;
+}
+
+SEC("cgroup/sendmsg_unix")
+int sendmsg_unix_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/set_global_vars.c b/tools/testing/selftests/bpf/progs/set_global_vars.c
new file mode 100644
index 000000000000..ebaef28b2cb3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/set_global_vars.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include "bpf_experimental.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include <stdbool.h>
+
+char _license[] SEC("license") = "GPL";
+
+typedef __s32 s32;
+typedef s32 i32;
+typedef __u8 u8;
+
+enum Enum { EA1 = 0, EA2 = 11, EA3 = 10 };
+enum Enumu64 {EB1 = 0llu, EB2 = 12llu };
+enum Enums64 { EC1 = 0ll, EC2 = 13ll };
+
+const volatile __s64 var_s64 = -1;
+const volatile __u64 var_u64 = 0;
+const volatile i32 var_s32 = -1;
+const volatile __u32 var_u32 = 0;
+const volatile __s16 var_s16 = -1;
+const volatile __u16 var_u16 = 0;
+const volatile __s8 var_s8 = -1;
+const volatile u8 var_u8 = 0;
+const volatile enum Enum var_ea = EA1;
+const volatile enum Enumu64 var_eb = EB1;
+const volatile enum Enums64 var_ec = EC1;
+const volatile bool var_b = false;
+const volatile i32 arr[32];
+const volatile enum Enum enum_arr[32];
+const volatile i32 three_d[47][19][17];
+const volatile i32 *ptr_arr[32];
+
+struct Struct {
+ int:16;
+ __u16 filler;
+ struct {
+ const __u16 filler2;
+ };
+ struct Struct2 {
+ __u16 filler;
+ volatile struct {
+ const int:1;
+ union {
+ const volatile u8 var_u8[3];
+ const volatile __s16 filler3;
+ const int:1;
+ s32 mat[7][5];
+ } u;
+ };
+ } struct2[2][4];
+};
+
+const volatile __u32 stru = 0; /* same prefix as below */
+const volatile struct Struct struct1[3];
+const volatile struct Struct struct11[11][7];
+
+struct Struct3 {
+ struct {
+ u8 var_u8_l;
+ };
+ struct {
+ struct {
+ u8 var_u8_h;
+ };
+ };
+};
+
+typedef struct Struct3 Struct3_t;
+
+union Union {
+ __u16 var_u16;
+ Struct3_t struct3;
+};
+
+const volatile union Union union1 = {.var_u16 = -1};
+
+SEC("socket")
+int test_set_globals(void *ctx)
+{
+ volatile __s8 a;
+
+ a = var_s64;
+ a = var_u64;
+ a = var_s32;
+ a = var_u32;
+ a = var_s16;
+ a = var_u16;
+ a = var_s8;
+ a = var_u8;
+ a = var_ea;
+ a = var_eb;
+ a = var_ec;
+ a = var_b;
+ a = struct1[2].struct2[1][2].u.var_u8[2];
+ a = union1.var_u16;
+ a = arr[3];
+ a = arr[EA2];
+ a = enum_arr[EC2];
+ a = three_d[31][7][EA2];
+ a = struct1[2].struct2[1][2].u.mat[5][3];
+ a = struct11[7][5].struct2[0][1].u.mat[3][0];
+
+ return a;
+}
diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c
new file mode 100644
index 000000000000..d330b1511979
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+extern unsigned long CONFIG_HZ __kconfig;
+
+const volatile char veth[IFNAMSIZ];
+const volatile int veth_ifindex;
+
+int nr_listen;
+int nr_passive;
+int nr_active;
+int nr_connect;
+int nr_binddev;
+int nr_socket_post_create;
+int nr_fin_wait1;
+
+struct sockopt_test {
+ int opt;
+ int new;
+ int restore;
+ int expected;
+ int tcp_expected;
+ unsigned int flip:1;
+};
+
+static const char not_exist_cc[] = "not_exist";
+static const char cubic_cc[] = "cubic";
+static const char reno_cc[] = "reno";
+
+static const struct sockopt_test sol_socket_tests[] = {
+ { .opt = SO_REUSEADDR, .flip = 1, },
+ { .opt = SO_SNDBUF, .new = 8123, .expected = 8123 * 2, },
+ { .opt = SO_RCVBUF, .new = 8123, .expected = 8123 * 2, },
+ { .opt = SO_KEEPALIVE, .flip = 1, },
+ { .opt = SO_PRIORITY, .new = 0xeb9f, .expected = 0xeb9f, },
+ { .opt = SO_REUSEPORT, .flip = 1, },
+ { .opt = SO_RCVLOWAT, .new = 8123, .expected = 8123, },
+ { .opt = SO_MARK, .new = 0xeb9f, .expected = 0xeb9f, },
+ { .opt = SO_MAX_PACING_RATE, .new = 0xeb9f, .expected = 0xeb9f, },
+ { .opt = SO_TXREHASH, .flip = 1, },
+ { .opt = 0, },
+};
+
+static const struct sockopt_test sol_tcp_tests[] = {
+ { .opt = TCP_NODELAY, .flip = 1, },
+ { .opt = TCP_KEEPIDLE, .new = 123, .expected = 123, .restore = 321, },
+ { .opt = TCP_KEEPINTVL, .new = 123, .expected = 123, .restore = 321, },
+ { .opt = TCP_KEEPCNT, .new = 123, .expected = 123, .restore = 124, },
+ { .opt = TCP_SYNCNT, .new = 123, .expected = 123, .restore = 124, },
+ { .opt = TCP_WINDOW_CLAMP, .new = 8123, .expected = 8123, .restore = 8124, },
+ { .opt = TCP_CONGESTION, },
+ { .opt = TCP_THIN_LINEAR_TIMEOUTS, .flip = 1, },
+ { .opt = TCP_USER_TIMEOUT, .new = 123400, .expected = 123400, },
+ { .opt = TCP_NOTSENT_LOWAT, .new = 1314, .expected = 1314, },
+ { .opt = TCP_BPF_SOCK_OPS_CB_FLAGS, .new = BPF_SOCK_OPS_ALL_CB_FLAGS,
+ .expected = BPF_SOCK_OPS_ALL_CB_FLAGS, },
+ { .opt = TCP_BPF_DELACK_MAX, .new = 30000, .expected = 30000, },
+ { .opt = TCP_BPF_RTO_MIN, .new = 30000, .expected = 30000, },
+ { .opt = TCP_RTO_MAX_MS, .new = 2000, .expected = 2000, },
+ { .opt = 0, },
+};
+
+static const struct sockopt_test sol_ip_tests[] = {
+ { .opt = IP_TOS, .new = 0xe1, .expected = 0xe1, .tcp_expected = 0xe0, },
+ { .opt = 0, },
+};
+
+static const struct sockopt_test sol_ipv6_tests[] = {
+ { .opt = IPV6_TCLASS, .new = 0xe1, .expected = 0xe1, .tcp_expected = 0xe0, },
+ { .opt = IPV6_AUTOFLOWLABEL, .flip = 1, },
+ { .opt = 0, },
+};
+
+struct loop_ctx {
+ void *ctx;
+ struct sock *sk;
+};
+
+static bool sk_is_tcp(struct sock *sk)
+{
+ return (sk->__sk_common.skc_family == AF_INET ||
+ sk->__sk_common.skc_family == AF_INET6) &&
+ sk->sk_type == SOCK_STREAM &&
+ sk->sk_protocol == IPPROTO_TCP;
+}
+
+static int bpf_test_sockopt_flip(void *ctx, struct sock *sk,
+ const struct sockopt_test *t,
+ int level)
+{
+ int old, tmp, new, opt = t->opt;
+
+ opt = t->opt;
+
+ if (opt == SO_TXREHASH && !sk_is_tcp(sk))
+ return 0;
+
+ if (bpf_getsockopt(ctx, level, opt, &old, sizeof(old)))
+ return 1;
+ /* kernel initialized txrehash to 255 */
+ if (level == SOL_SOCKET && opt == SO_TXREHASH && old != 0 && old != 1)
+ old = 1;
+
+ new = !old;
+ if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new)))
+ return 1;
+ if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) ||
+ tmp != new)
+ return 1;
+
+ if (bpf_setsockopt(ctx, level, opt, &old, sizeof(old)))
+ return 1;
+
+ return 0;
+}
+
+static int bpf_test_sockopt_int(void *ctx, struct sock *sk,
+ const struct sockopt_test *t,
+ int level)
+{
+ int old, tmp, new, expected, opt;
+
+ opt = t->opt;
+ new = t->new;
+ if (sk->sk_type == SOCK_STREAM && t->tcp_expected)
+ expected = t->tcp_expected;
+ else
+ expected = t->expected;
+
+ if (bpf_getsockopt(ctx, level, opt, &old, sizeof(old)) ||
+ old == new)
+ return 1;
+
+ if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new)))
+ return 1;
+ if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) ||
+ tmp != expected)
+ return 1;
+
+ if (t->restore)
+ old = t->restore;
+ if (bpf_setsockopt(ctx, level, opt, &old, sizeof(old)))
+ return 1;
+
+ return 0;
+}
+
+static int bpf_test_socket_sockopt(__u32 i, struct loop_ctx *lc)
+{
+ const struct sockopt_test *t;
+
+ if (i >= ARRAY_SIZE(sol_socket_tests))
+ return 1;
+
+ t = &sol_socket_tests[i];
+ if (!t->opt)
+ return 1;
+
+ if (t->flip)
+ return bpf_test_sockopt_flip(lc->ctx, lc->sk, t, SOL_SOCKET);
+
+ return bpf_test_sockopt_int(lc->ctx, lc->sk, t, SOL_SOCKET);
+}
+
+static int bpf_test_ip_sockopt(__u32 i, struct loop_ctx *lc)
+{
+ const struct sockopt_test *t;
+
+ if (i >= ARRAY_SIZE(sol_ip_tests))
+ return 1;
+
+ t = &sol_ip_tests[i];
+ if (!t->opt)
+ return 1;
+
+ if (t->flip)
+ return bpf_test_sockopt_flip(lc->ctx, lc->sk, t, IPPROTO_IP);
+
+ return bpf_test_sockopt_int(lc->ctx, lc->sk, t, IPPROTO_IP);
+}
+
+static int bpf_test_ipv6_sockopt(__u32 i, struct loop_ctx *lc)
+{
+ const struct sockopt_test *t;
+
+ if (i >= ARRAY_SIZE(sol_ipv6_tests))
+ return 1;
+
+ t = &sol_ipv6_tests[i];
+ if (!t->opt)
+ return 1;
+
+ if (t->flip)
+ return bpf_test_sockopt_flip(lc->ctx, lc->sk, t, IPPROTO_IPV6);
+
+ return bpf_test_sockopt_int(lc->ctx, lc->sk, t, IPPROTO_IPV6);
+}
+
+static int bpf_test_tcp_sockopt(__u32 i, struct loop_ctx *lc)
+{
+ const struct sockopt_test *t;
+ struct sock *sk;
+ void *ctx;
+
+ if (i >= ARRAY_SIZE(sol_tcp_tests))
+ return 1;
+
+ t = &sol_tcp_tests[i];
+ if (!t->opt)
+ return 1;
+
+ ctx = lc->ctx;
+ sk = lc->sk;
+
+ if (t->opt == TCP_CONGESTION) {
+ char old_cc[16], tmp_cc[16];
+ const char *new_cc;
+ int new_cc_len;
+
+ if (!bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION,
+ (void *)not_exist_cc, sizeof(not_exist_cc)))
+ return 1;
+ if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, old_cc, sizeof(old_cc)))
+ return 1;
+ if (!bpf_strncmp(old_cc, sizeof(old_cc), cubic_cc)) {
+ new_cc = reno_cc;
+ new_cc_len = sizeof(reno_cc);
+ } else {
+ new_cc = cubic_cc;
+ new_cc_len = sizeof(cubic_cc);
+ }
+ if (bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, (void *)new_cc,
+ new_cc_len))
+ return 1;
+ if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, tmp_cc, sizeof(tmp_cc)))
+ return 1;
+ if (bpf_strncmp(tmp_cc, sizeof(tmp_cc), new_cc))
+ return 1;
+ if (bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, old_cc, sizeof(old_cc)))
+ return 1;
+ return 0;
+ }
+
+ if (t->flip)
+ return bpf_test_sockopt_flip(ctx, sk, t, IPPROTO_TCP);
+
+ return bpf_test_sockopt_int(ctx, sk, t, IPPROTO_TCP);
+}
+
+static int bpf_test_sockopt(void *ctx, struct sock *sk)
+{
+ struct loop_ctx lc = { .ctx = ctx, .sk = sk, };
+ __u16 family, proto;
+ int n;
+
+ family = sk->sk_family;
+ proto = sk->sk_protocol;
+
+ n = bpf_loop(ARRAY_SIZE(sol_socket_tests), bpf_test_socket_sockopt, &lc, 0);
+ if (n != ARRAY_SIZE(sol_socket_tests))
+ return -1;
+
+ if (proto == IPPROTO_TCP) {
+ n = bpf_loop(ARRAY_SIZE(sol_tcp_tests), bpf_test_tcp_sockopt, &lc, 0);
+ if (n != ARRAY_SIZE(sol_tcp_tests))
+ return -1;
+ }
+
+ if (family == AF_INET) {
+ n = bpf_loop(ARRAY_SIZE(sol_ip_tests), bpf_test_ip_sockopt, &lc, 0);
+ if (n != ARRAY_SIZE(sol_ip_tests))
+ return -1;
+ } else {
+ n = bpf_loop(ARRAY_SIZE(sol_ipv6_tests), bpf_test_ipv6_sockopt, &lc, 0);
+ if (n != ARRAY_SIZE(sol_ipv6_tests))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int binddev_test(void *ctx)
+{
+ const char empty_ifname[] = "";
+ int ifindex, zero = 0;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
+ (void *)veth, sizeof(veth)))
+ return -1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &ifindex, sizeof(int)) ||
+ ifindex != veth_ifindex)
+ return -1;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
+ (void *)empty_ifname, sizeof(empty_ifname)))
+ return -1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &ifindex, sizeof(int)) ||
+ ifindex != 0)
+ return -1;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ (void *)&veth_ifindex, sizeof(int)))
+ return -1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &ifindex, sizeof(int)) ||
+ ifindex != veth_ifindex)
+ return -1;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &zero, sizeof(int)))
+ return -1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &ifindex, sizeof(int)) ||
+ ifindex != 0)
+ return -1;
+
+ return 0;
+}
+
+static int test_tcp_maxseg(void *ctx, struct sock *sk)
+{
+ int val = 1314, tmp;
+
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return bpf_setsockopt(ctx, IPPROTO_TCP, TCP_MAXSEG,
+ &val, sizeof(val));
+
+ if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_MAXSEG, &tmp, sizeof(tmp)) ||
+ tmp > val)
+ return -1;
+
+ return 0;
+}
+
+static int test_tcp_saved_syn(void *ctx, struct sock *sk)
+{
+ __u8 saved_syn[20];
+ int one = 1;
+
+ if (sk->sk_state == TCP_LISTEN)
+ return bpf_setsockopt(ctx, IPPROTO_TCP, TCP_SAVE_SYN,
+ &one, sizeof(one));
+
+ return bpf_getsockopt(ctx, IPPROTO_TCP, TCP_SAVED_SYN,
+ saved_syn, sizeof(saved_syn));
+}
+
+SEC("lsm_cgroup/socket_post_create")
+int BPF_PROG(socket_post_create, struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 1;
+
+ nr_socket_post_create += !bpf_test_sockopt(sk, sk);
+ nr_binddev += !binddev_test(sk);
+
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int _getsockopt(struct bpf_sockopt *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ int *optval = ctx->optval;
+ struct tcp_sock *tp;
+
+ if (!sk || ctx->level != SOL_TCP || ctx->optname != TCP_BPF_SOCK_OPS_CB_FLAGS)
+ return 1;
+
+ tp = bpf_core_cast(sk, struct tcp_sock);
+ if (ctx->optval + sizeof(int) <= ctx->optval_end) {
+ *optval = tp->bpf_sock_ops_cb_flags;
+ ctx->retval = 0;
+ }
+ return 1;
+}
+
+SEC("sockops")
+int skops_sockopt(struct bpf_sock_ops *skops)
+{
+ struct bpf_sock *bpf_sk = skops->sk;
+ struct sock *sk;
+ int flags;
+
+ if (!bpf_sk)
+ return 1;
+
+ sk = (struct sock *)bpf_skc_to_tcp_sock(bpf_sk);
+ if (!sk)
+ return 1;
+
+ switch (skops->op) {
+ case BPF_SOCK_OPS_TCP_LISTEN_CB:
+ nr_listen += !(bpf_test_sockopt(skops, sk) ||
+ test_tcp_maxseg(skops, sk) ||
+ test_tcp_saved_syn(skops, sk));
+ break;
+ case BPF_SOCK_OPS_TCP_CONNECT_CB:
+ nr_connect += !(bpf_test_sockopt(skops, sk) ||
+ test_tcp_maxseg(skops, sk));
+ break;
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ nr_active += !(bpf_test_sockopt(skops, sk) ||
+ test_tcp_maxseg(skops, sk));
+ break;
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ nr_passive += !(bpf_test_sockopt(skops, sk) ||
+ test_tcp_maxseg(skops, sk) ||
+ test_tcp_saved_syn(skops, sk));
+ flags = skops->bpf_sock_ops_cb_flags | BPF_SOCK_OPS_STATE_CB_FLAG;
+ bpf_setsockopt(skops, SOL_TCP, TCP_BPF_SOCK_OPS_CB_FLAGS, &flags, sizeof(flags));
+ break;
+ case BPF_SOCK_OPS_STATE_CB:
+ if (skops->args[1] == BPF_TCP_CLOSE_WAIT)
+ nr_fin_wait1 += !bpf_test_sockopt(skops, sk);
+ break;
+ }
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c
new file mode 100644
index 000000000000..46d6eb2a3b17
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Facebook */
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+void *local_storage_ptr = NULL;
+void *sk_ptr = NULL;
+int cookie_found = 0;
+__u64 cookie = 0;
+__u32 omem = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} sk_storage SEC(".maps");
+
+SEC("fexit/bpf_local_storage_destroy")
+int BPF_PROG(bpf_local_storage_destroy, struct bpf_local_storage *local_storage)
+{
+ struct sock *sk;
+
+ if (local_storage_ptr != local_storage)
+ return 0;
+
+ sk = bpf_core_cast(sk_ptr, struct sock);
+ if (sk->sk_cookie.counter != cookie)
+ return 0;
+
+ cookie_found++;
+ omem = sk->sk_omem_alloc.counter;
+ local_storage_ptr = NULL;
+
+ return 0;
+}
+
+SEC("fentry/inet6_sock_destruct")
+int BPF_PROG(inet6_sock_destruct, struct sock *sk)
+{
+ int *value;
+
+ if (!cookie || sk->sk_cookie.counter != cookie)
+ return 0;
+
+ value = bpf_sk_storage_get(&sk_storage, sk, 0, 0);
+ if (value && *value == 0xdeadbeef) {
+ cookie_found++;
+ sk_ptr = sk;
+ local_storage_ptr = sk->sk_bpf_storage;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/skb_load_bytes.c b/tools/testing/selftests/bpf/progs/skb_load_bytes.c
new file mode 100644
index 000000000000..e4252fd973be
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/skb_load_bytes.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 load_offset = 0;
+int test_result = 0;
+
+SEC("tc")
+int skb_process(struct __sk_buff *skb)
+{
+ char buf[16];
+
+ test_result = bpf_skb_load_bytes(skb, load_offset, buf, 10);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/skb_pkt_end.c b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
new file mode 100644
index 000000000000..3bb4451524a1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef BPF_NO_PRESERVE_ACCESS_INDEX
+#define BPF_NO_PRESERVE_ACCESS_INDEX
+#endif
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+
+#define INLINE __always_inline
+
+#define skb_shorter(skb, len) ((void *)(long)(skb)->data + (len) > (void *)(long)skb->data_end)
+
+#define ETH_IPV4_TCP_SIZE (14 + sizeof(struct iphdr) + sizeof(struct tcphdr))
+
+static INLINE struct iphdr *get_iphdr(struct __sk_buff *skb)
+{
+ struct iphdr *ip = NULL;
+ struct ethhdr *eth;
+
+ if (skb_shorter(skb, ETH_IPV4_TCP_SIZE))
+ goto out;
+
+ eth = (void *)(long)skb->data;
+ ip = (void *)(eth + 1);
+
+out:
+ return ip;
+}
+
+SEC("tc")
+int main_prog(struct __sk_buff *skb)
+{
+ struct iphdr *ip = NULL;
+ struct tcphdr *tcp;
+ __u8 proto = 0;
+ int urg_ptr;
+ u32 offset;
+
+ if (!(ip = get_iphdr(skb)))
+ goto out;
+
+ proto = ip->protocol;
+
+ if (proto != IPPROTO_TCP)
+ goto out;
+
+ tcp = (void*)(ip + 1);
+ if (tcp->dest != 0)
+ goto out;
+ if (!tcp)
+ goto out;
+
+ urg_ptr = tcp->urg_ptr;
+
+ /* Checksum validation part */
+ proto++;
+ offset = sizeof(struct ethhdr) + offsetof(struct iphdr, protocol);
+ bpf_skb_store_bytes(skb, offset, &proto, sizeof(proto), BPF_F_RECOMPUTE_CSUM);
+
+ return urg_ptr;
+out:
+ return -1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sock_addr_kern.c b/tools/testing/selftests/bpf/progs/sock_addr_kern.c
new file mode 100644
index 000000000000..84ad515eafd6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sock_addr_kern.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("syscall")
+int init_sock(struct init_sock_args *args)
+{
+ bpf_kfunc_init_sock(args);
+
+ return 0;
+}
+
+SEC("syscall")
+int close_sock(void *ctx)
+{
+ bpf_kfunc_close_sock();
+
+ return 0;
+}
+
+SEC("syscall")
+int kernel_connect(struct addr_args *args)
+{
+ return bpf_kfunc_call_kernel_connect(args);
+}
+
+SEC("syscall")
+int kernel_bind(struct addr_args *args)
+{
+ return bpf_kfunc_call_kernel_bind(args);
+}
+
+SEC("syscall")
+int kernel_listen(struct addr_args *args)
+{
+ return bpf_kfunc_call_kernel_listen();
+}
+
+SEC("syscall")
+int kernel_sendmsg(struct sendmsg_args *args)
+{
+ return bpf_kfunc_call_kernel_sendmsg(args);
+}
+
+SEC("syscall")
+int sock_sendmsg(struct sendmsg_args *args)
+{
+ return bpf_kfunc_call_sock_sendmsg(args);
+}
+
+SEC("syscall")
+int kernel_getsockname(struct addr_args *args)
+{
+ return bpf_kfunc_call_kernel_getsockname(args);
+}
+
+SEC("syscall")
+int kernel_getpeername(struct addr_args *args)
+{
+ return bpf_kfunc_call_kernel_getpeername(args);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sock_destroy_prog.c b/tools/testing/selftests/bpf/progs/sock_destroy_prog.c
new file mode 100644
index 000000000000..9e0bf7a54cec
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sock_destroy_prog.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include "bpf_tracing_net.h"
+
+__be16 serv_port = 0;
+
+int bpf_sock_destroy(struct sock_common *sk) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} tcp_conn_sockets SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} udp_conn_sockets SEC(".maps");
+
+SEC("cgroup/connect6")
+int sock_connect(struct bpf_sock_addr *ctx)
+{
+ __u64 sock_cookie = 0;
+ int key = 0;
+ __u32 keyc = 0;
+
+ if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
+ return 1;
+
+ sock_cookie = bpf_get_socket_cookie(ctx);
+ if (ctx->protocol == IPPROTO_TCP)
+ bpf_map_update_elem(&tcp_conn_sockets, &key, &sock_cookie, 0);
+ else if (ctx->protocol == IPPROTO_UDP)
+ bpf_map_update_elem(&udp_conn_sockets, &keyc, &sock_cookie, 0);
+ else
+ return 1;
+
+ return 1;
+}
+
+SEC("iter/tcp")
+int iter_tcp6_client(struct bpf_iter__tcp *ctx)
+{
+ struct sock_common *sk_common = ctx->sk_common;
+ __u64 sock_cookie = 0;
+ __u64 *val;
+ int key = 0;
+
+ if (!sk_common)
+ return 0;
+
+ if (sk_common->skc_family != AF_INET6)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk_common);
+ val = bpf_map_lookup_elem(&tcp_conn_sockets, &key);
+ if (!val)
+ return 0;
+ /* Destroy connected client sockets. */
+ if (sock_cookie == *val)
+ bpf_sock_destroy(sk_common);
+
+ return 0;
+}
+
+SEC("iter/tcp")
+int iter_tcp6_server(struct bpf_iter__tcp *ctx)
+{
+ struct sock_common *sk_common = ctx->sk_common;
+ const struct inet_connection_sock *icsk;
+ const struct inet_sock *inet;
+ struct tcp6_sock *tcp_sk;
+ __be16 srcp;
+
+ if (!sk_common)
+ return 0;
+
+ if (sk_common->skc_family != AF_INET6)
+ return 0;
+
+ tcp_sk = bpf_skc_to_tcp6_sock(sk_common);
+ if (!tcp_sk)
+ return 0;
+
+ icsk = &tcp_sk->tcp.inet_conn;
+ inet = &icsk->icsk_inet;
+ srcp = inet->inet_sport;
+
+ /* Destroy server sockets. */
+ if (srcp == serv_port)
+ bpf_sock_destroy(sk_common);
+
+ return 0;
+}
+
+
+SEC("iter/udp")
+int iter_udp6_client(struct bpf_iter__udp *ctx)
+{
+ struct udp_sock *udp_sk = ctx->udp_sk;
+ struct sock *sk = (struct sock *) udp_sk;
+ __u64 sock_cookie = 0, *val;
+ int key = 0;
+
+ if (!sk)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk);
+ val = bpf_map_lookup_elem(&udp_conn_sockets, &key);
+ if (!val)
+ return 0;
+ /* Destroy connected client sockets. */
+ if (sock_cookie == *val)
+ bpf_sock_destroy((struct sock_common *)sk);
+
+ return 0;
+}
+
+SEC("iter/udp")
+int iter_udp6_server(struct bpf_iter__udp *ctx)
+{
+ struct udp_sock *udp_sk = ctx->udp_sk;
+ struct sock *sk = (struct sock *) udp_sk;
+ struct inet_sock *inet;
+ __be16 srcp;
+
+ if (!sk)
+ return 0;
+
+ inet = &udp_sk->inet;
+ srcp = inet->inet_sport;
+ if (srcp == serv_port)
+ bpf_sock_destroy((struct sock_common *)sk);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c b/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c
new file mode 100644
index 000000000000..dd6850b58e25
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int bpf_sock_destroy(struct sock_common *sk) __ksym;
+
+SEC("tp_btf/tcp_destroy_sock")
+__failure __msg("calling kernel function bpf_sock_destroy is not allowed")
+int BPF_PROG(trace_tcp_destroy_sock, struct sock *sk)
+{
+ /* should not load */
+ bpf_sock_destroy((struct sock_common *)sk);
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c
new file mode 100644
index 000000000000..77966ded5467
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Meta
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_tracing_net.h"
+#include "bpf_kfuncs.h"
+
+#define ATTR __always_inline
+#include "test_jhash.h"
+
+static bool ipv6_addr_loopback(const struct in6_addr *a)
+{
+ return (a->s6_addr32[0] | a->s6_addr32[1] |
+ a->s6_addr32[2] | (a->s6_addr32[3] ^ bpf_htonl(1))) == 0;
+}
+
+static bool ipv4_addr_loopback(__be32 a)
+{
+ return a == bpf_ntohl(0x7f000001);
+}
+
+volatile const unsigned int sf;
+volatile const unsigned int ss;
+volatile const __u16 ports[2];
+unsigned int bucket[2];
+
+SEC("iter/tcp")
+int iter_tcp_soreuse(struct bpf_iter__tcp *ctx)
+{
+ struct sock *sk = (struct sock *)ctx->sk_common;
+ struct inet_hashinfo *hinfo;
+ unsigned int hash;
+ __u64 sock_cookie;
+ struct net *net;
+ int idx;
+
+ if (!sk)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk);
+ sk = bpf_core_cast(sk, struct sock);
+ if (sk->sk_family != sf ||
+ (ss && sk->sk_state != ss) ||
+ (sk->sk_family == AF_INET6 ?
+ !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) :
+ !ipv4_addr_loopback(sk->sk_rcv_saddr)))
+ return 0;
+
+ if (sk->sk_num == ports[0])
+ idx = 0;
+ else if (sk->sk_num == ports[1])
+ idx = 1;
+ else if (!ports[0] && !ports[1])
+ idx = 0;
+ else
+ return 0;
+
+ /* bucket selection as in inet_lhash2_bucket_sk() */
+ net = sk->sk_net.net;
+ hash = jhash2(sk->sk_v6_rcv_saddr.s6_addr32, 4, net->hash_mix);
+ hash ^= sk->sk_num;
+ hinfo = net->ipv4.tcp_death_row.hashinfo;
+ bucket[idx] = hash & hinfo->lhash2_mask;
+ bpf_seq_write(ctx->meta->seq, &idx, sizeof(idx));
+ bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie));
+
+ return 0;
+}
+
+volatile const __u64 destroy_cookie;
+
+SEC("iter/tcp")
+int iter_tcp_destroy(struct bpf_iter__tcp *ctx)
+{
+ struct sock_common *sk_common = (struct sock_common *)ctx->sk_common;
+ __u64 sock_cookie;
+
+ if (!sk_common)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk_common);
+ if (sock_cookie != destroy_cookie)
+ return 0;
+
+ bpf_sock_destroy(sk_common);
+ bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie));
+
+ return 0;
+}
+
+#define udp_sk(ptr) container_of(ptr, struct udp_sock, inet.sk)
+
+SEC("iter/udp")
+int iter_udp_soreuse(struct bpf_iter__udp *ctx)
+{
+ struct sock *sk = (struct sock *)ctx->udp_sk;
+ struct udp_table *udptable;
+ __u64 sock_cookie;
+ int idx;
+
+ if (!sk)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk);
+ sk = bpf_core_cast(sk, struct sock);
+ if (sk->sk_family != sf ||
+ (sk->sk_family == AF_INET6 ?
+ !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) :
+ !ipv4_addr_loopback(sk->sk_rcv_saddr)))
+ return 0;
+
+ if (sk->sk_num == ports[0])
+ idx = 0;
+ else if (sk->sk_num == ports[1])
+ idx = 1;
+ else if (!ports[0] && !ports[1])
+ idx = 0;
+ else
+ return 0;
+
+ /* bucket selection as in udp_hashslot2() */
+ udptable = sk->sk_net.net->ipv4.udp_table;
+ bucket[idx] = udp_sk(sk)->udp_portaddr_hash & udptable->mask;
+ bpf_seq_write(ctx->meta->seq, &idx, sizeof(idx));
+ bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie));
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/socket_cookie_prog.c b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
index 0cb5656a22b0..35630a5aaf5f 100644
--- a/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
+++ b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
-#include <linux/bpf.h>
-#include <sys/socket.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+
+#define AF_INET6 10
struct socket_cookie {
__u64 cookie_key;
@@ -19,6 +21,14 @@ struct {
__type(value, struct socket_cookie);
} socket_cookies SEC(".maps");
+/*
+ * These three programs get executed in a row on connect() syscalls. The
+ * userspace side of the test creates a client socket, issues a connect() on it
+ * and then checks that the local storage associated with this socket has:
+ * cookie_value == local_port << 8 | 0xFF
+ * The different parts of this cookie_value are appended by those hooks if they
+ * all agree on the output of bpf_get_socket_cookie().
+ */
SEC("cgroup/connect6")
int set_cookie(struct bpf_sock_addr *ctx)
{
@@ -32,16 +42,16 @@ int set_cookie(struct bpf_sock_addr *ctx)
if (!p)
return 1;
- p->cookie_value = 0xFF;
+ p->cookie_value = 0xF;
p->cookie_key = bpf_get_socket_cookie(ctx);
return 1;
}
SEC("sockops")
-int update_cookie(struct bpf_sock_ops *ctx)
+int update_cookie_sockops(struct bpf_sock_ops *ctx)
{
- struct bpf_sock *sk;
+ struct bpf_sock *sk = ctx->sk;
struct socket_cookie *p;
if (ctx->family != AF_INET6)
@@ -50,21 +60,40 @@ int update_cookie(struct bpf_sock_ops *ctx)
if (ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB)
return 1;
- if (!ctx->sk)
+ if (!sk)
return 1;
- p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0, 0);
+ p = bpf_sk_storage_get(&socket_cookies, sk, 0, 0);
if (!p)
return 1;
if (p->cookie_key != bpf_get_socket_cookie(ctx))
return 1;
- p->cookie_value = (ctx->local_port << 8) | p->cookie_value;
+ p->cookie_value |= (ctx->local_port << 8);
return 1;
}
-int _version SEC("version") = 1;
+SEC("fexit/inet_stream_connect")
+int BPF_PROG(update_cookie_tracing, struct socket *sock,
+ struct sockaddr *uaddr, int addr_len, int flags)
+{
+ struct socket_cookie *p;
+
+ if (uaddr->sa_family != AF_INET6)
+ return 0;
+
+ p = bpf_sk_storage_get(&socket_cookies, sock->sk, 0, 0);
+ if (!p)
+ return 0;
+
+ if (p->cookie_key != bpf_get_socket_cookie(sock->sk))
+ return 0;
+
+ p->cookie_value |= 0xF0;
+
+ return 0;
+}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
index ca283af80d4e..c9abfe3a11af 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
@@ -2,15 +2,11 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
-
SEC("sk_skb1")
int bpf_prog1(struct __sk_buff *skb)
{
void *data_end = (void *)(long) skb->data_end;
void *data = (void *)(long) skb->data;
- __u32 lport = skb->local_port;
- __u32 rport = skb->remote_port;
__u8 *d = data;
int err;
diff --git a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
index fdb4bf4408fa..80632954c5a1 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
@@ -3,23 +3,9 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
-
SEC("sk_msg1")
int bpf_prog1(struct sk_msg_md *msg)
{
- void *data_end = (void *)(long) msg->data_end;
- void *data = (void *)(long) msg->data;
-
- char *d;
-
- if (data + 8 > data_end)
- return SK_DROP;
-
- bpf_printk("data length %i\n", (__u64)msg->data_end - (__u64)msg->data);
- d = (char *)data;
- bpf_printk("hello sendmsg hook %i %i\n", d[0], d[1]);
-
return SK_PASS;
}
diff --git a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
index 4797dc985064..0660f29dca95 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
@@ -1,28 +1,27 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-
-int _version SEC("version") = 1;
+#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} sock_map_rx SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} sock_map_tx SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} sock_map_msg SEC(".maps");
struct {
@@ -42,6 +41,9 @@ int bpf_prog2(struct __sk_buff *skb)
__u8 *d = data;
__u8 sk, map;
+ __sink(lport);
+ __sink(rport);
+
if (data + 8 > data_end)
return SK_DROP;
diff --git a/tools/testing/selftests/bpf/progs/sockopt_inherit.c b/tools/testing/selftests/bpf/progs/sockopt_inherit.c
index c6d428a8d785..a3434b840928 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_inherit.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_inherit.c
@@ -3,13 +3,14 @@
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
#define SOL_CUSTOM 0xdeadbeef
#define CUSTOM_INHERIT1 0
#define CUSTOM_INHERIT2 1
#define CUSTOM_LISTENER 2
+__s32 page_size = 0;
+
struct sockopt_inherit {
__u8 val;
};
@@ -56,7 +57,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
__u8 *optval = ctx->optval;
if (ctx->level != SOL_CUSTOM)
- return 1; /* only interested in SOL_CUSTOM */
+ goto out; /* only interested in SOL_CUSTOM */
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -71,6 +72,12 @@ int _getsockopt(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
SEC("cgroup/setsockopt")
@@ -81,7 +88,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
__u8 *optval = ctx->optval;
if (ctx->level != SOL_CUSTOM)
- return 1; /* only interested in SOL_CUSTOM */
+ goto out; /* only interested in SOL_CUSTOM */
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -94,4 +101,10 @@ int _setsockopt(struct bpf_sockopt *ctx)
ctx->optlen = -1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/sockopt_multi.c b/tools/testing/selftests/bpf/progs/sockopt_multi.c
index 9d8c212dde9f..db67278e12d4 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_multi.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_multi.c
@@ -4,16 +4,17 @@
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
-SEC("cgroup/getsockopt/child")
+__s32 page_size = 0;
+
+SEC("cgroup/getsockopt")
int _getsockopt_child(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
- return 1;
+ goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -27,16 +28,22 @@ int _getsockopt_child(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
-SEC("cgroup/getsockopt/parent")
+SEC("cgroup/getsockopt")
int _getsockopt_parent(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
- return 1;
+ goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -50,6 +57,12 @@ int _getsockopt_parent(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
SEC("cgroup/setsockopt")
@@ -59,7 +72,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
__u8 *optval = ctx->optval;
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
- return 1;
+ goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -68,4 +81,10 @@ int _setsockopt(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c
new file mode 100644
index 000000000000..5c3614333b01
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "bpf_tracing_net.h"
+
+char _license[] SEC("license") = "GPL";
+
+__s32 page_size = 0;
+
+const char cc_reno[TCP_CA_NAME_MAX] = "reno";
+const char cc_cubic[TCP_CA_NAME_MAX] = "cubic";
+
+SEC("cgroup/setsockopt")
+int sockopt_qos_to_cc(struct bpf_sockopt *ctx)
+{
+ void *optval_end = ctx->optval_end;
+ int *optval = ctx->optval;
+ char buf[TCP_CA_NAME_MAX];
+
+ if (ctx->level != SOL_IPV6 || ctx->optname != IPV6_TCLASS)
+ goto out;
+
+ if (optval + 1 > optval_end)
+ return 0; /* EPERM, bounds check */
+
+ if (bpf_getsockopt(ctx->sk, SOL_TCP, TCP_CONGESTION, &buf, sizeof(buf)))
+ return 0;
+
+ if (bpf_strncmp(buf, sizeof(buf), cc_cubic))
+ return 0;
+
+ if (*optval == 0x2d) {
+ if (bpf_setsockopt(ctx->sk, SOL_TCP, TCP_CONGESTION, (void *)&cc_reno,
+ sizeof(cc_reno)))
+ return 0;
+ }
+ return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c
index 712df7b49cb1..cb990a7d3d45 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c
@@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
+#include <linux/tcp.h>
#include <linux/bpf.h>
+#include <netinet/in.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
+int page_size = 0; /* userspace should set it */
+
+#ifndef SOL_TCP
+#define SOL_TCP IPPROTO_TCP
#endif
#define SOL_CUSTOM 0xdeadbeef
@@ -31,14 +32,27 @@ int _getsockopt(struct bpf_sockopt *ctx)
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
struct sockopt_sk *storage;
+ struct bpf_sock *sk;
+
+ /* Bypass AF_NETLINK. */
+ sk = ctx->sk;
+ if (sk && sk->family == AF_NETLINK)
+ goto out;
+
+ /* Make sure bpf_get_netns_cookie is callable.
+ */
+ if (bpf_get_netns_cookie(NULL) == 0)
+ return 0;
+
+ if (bpf_get_netns_cookie(ctx) == 0)
+ return 0;
if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
/* Not interested in SOL_IP:IP_TOS;
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
- ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
- return 1;
+ goto out;
}
if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
@@ -46,7 +60,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
- return 1;
+ goto out;
}
if (ctx->level == SOL_TCP && ctx->optname == TCP_CONGESTION) {
@@ -54,12 +68,28 @@ int _getsockopt(struct bpf_sockopt *ctx)
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
- return 1;
+ goto out;
+ }
+
+ if (ctx->level == SOL_TCP && ctx->optname == TCP_ZEROCOPY_RECEIVE) {
+ /* Verify that TCP_ZEROCOPY_RECEIVE triggers.
+ * It has a custom implementation for performance
+ * reasons.
+ */
+
+ /* Check that optval contains address (__u64) */
+ if (optval + sizeof(__u64) > optval_end)
+ return 0; /* bounds check */
+
+ if (((struct tcp_zerocopy_receive *)optval)->address != 0)
+ return 0; /* unexpected data */
+
+ goto out;
}
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
if (optval + 1 > optval_end)
- return 0; /* EPERM, bounds check */
+ return 0; /* bounds check */
ctx->retval = 0; /* Reset system call return value to zero */
@@ -71,25 +101,25 @@ int _getsockopt(struct bpf_sockopt *ctx)
* program can only see the first PAGE_SIZE
* bytes of data.
*/
- if (optval_end - optval != PAGE_SIZE)
- return 0; /* EPERM, unexpected data size */
+ if (optval_end - optval != page_size)
+ return 0; /* unexpected data size */
return 1;
}
if (ctx->level != SOL_CUSTOM)
- return 0; /* EPERM, deny everything except custom level */
+ return 0; /* deny everything except custom level */
if (optval + 1 > optval_end)
- return 0; /* EPERM, bounds check */
+ return 0; /* bounds check */
storage = bpf_sk_storage_get(&socket_storage_map, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!storage)
- return 0; /* EPERM, couldn't get sk storage */
+ return 0; /* couldn't get sk storage */
if (!ctx->retval)
- return 0; /* EPERM, kernel should not have handled
+ return 0; /* kernel should not have handled
* SOL_CUSTOM, something is wrong!
*/
ctx->retval = 0; /* Reset system call return value to zero */
@@ -98,6 +128,12 @@ int _getsockopt(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
SEC("cgroup/setsockopt")
@@ -106,6 +142,20 @@ int _setsockopt(struct bpf_sockopt *ctx)
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
struct sockopt_sk *storage;
+ struct bpf_sock *sk;
+
+ /* Bypass AF_NETLINK. */
+ sk = ctx->sk;
+ if (sk && sk->family == AF_NETLINK)
+ goto out;
+
+ /* Make sure bpf_get_netns_cookie is callable.
+ */
+ if (bpf_get_netns_cookie(NULL) == 0)
+ return 0;
+
+ if (bpf_get_netns_cookie(ctx) == 0)
+ return 0;
if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
/* Not interested in SOL_IP:IP_TOS;
@@ -120,7 +170,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
/* Overwrite SO_SNDBUF value */
if (optval + sizeof(__u32) > optval_end)
- return 0; /* EPERM, bounds check */
+ return 0; /* bounds check */
*(__u32 *)optval = 0x55AA;
ctx->optlen = 4;
@@ -132,7 +182,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
/* Always use cubic */
if (optval + 5 > optval_end)
- return 0; /* EPERM, bounds check */
+ return 0; /* bounds check */
memcpy(optval, "cubic", 5);
ctx->optlen = 5;
@@ -142,11 +192,11 @@ int _setsockopt(struct bpf_sockopt *ctx)
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
/* Original optlen is larger than PAGE_SIZE. */
- if (ctx->optlen != PAGE_SIZE * 2)
- return 0; /* EPERM, unexpected data size */
+ if (ctx->optlen != page_size * 2)
+ return 0; /* unexpected data size */
if (optval + 1 > optval_end)
- return 0; /* EPERM, bounds check */
+ return 0; /* bounds check */
/* Make sure we can trim the buffer. */
optval[0] = 0;
@@ -156,22 +206,22 @@ int _setsockopt(struct bpf_sockopt *ctx)
* program can only see the first PAGE_SIZE
* bytes of data.
*/
- if (optval_end - optval != PAGE_SIZE)
- return 0; /* EPERM, unexpected data size */
+ if (optval_end - optval != page_size)
+ return 0; /* unexpected data size */
return 1;
}
if (ctx->level != SOL_CUSTOM)
- return 0; /* EPERM, deny everything except custom level */
+ return 0; /* deny everything except custom level */
if (optval + 1 > optval_end)
- return 0; /* EPERM, bounds check */
+ return 0; /* bounds check */
storage = bpf_sk_storage_get(&socket_storage_map, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!storage)
- return 0; /* EPERM, couldn't get sk storage */
+ return 0; /* couldn't get sk storage */
storage->val = optval[0];
ctx->optlen = -1; /* BPF has consumed this option, don't call kernel
@@ -179,4 +229,10 @@ int _setsockopt(struct bpf_sockopt *ctx)
*/
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
new file mode 100644
index 000000000000..2eb297df3dd6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+#define TEST_STACK_DEPTH 2
+#define TEST_MAX_ENTRIES 16384
+
+typedef __u64 stack_trace_t[TEST_STACK_DEPTH];
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(max_entries, TEST_MAX_ENTRIES);
+ __type(key, __u32);
+ __type(value, stack_trace_t);
+} stackmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, TEST_MAX_ENTRIES);
+ __type(key, __u32);
+ __type(value, __u32);
+} stackid_hmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, TEST_MAX_ENTRIES);
+ __type(key, __u32);
+ __type(value, stack_trace_t);
+} stack_amap SEC(".maps");
+
+int pid = 0;
+int control = 0;
+int failed = 0;
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct trace_event_raw_sched_switch *ctx)
+{
+ __u32 max_len = TEST_STACK_DEPTH * sizeof(__u64);
+ __u32 key = 0, val = 0;
+ __u64 *stack_p;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ if (control)
+ return 0;
+
+ /* it should allow skipping whole buffer size entries */
+ key = bpf_get_stackid(ctx, &stackmap, TEST_STACK_DEPTH);
+ if ((int)key >= 0) {
+ /* The size of stackmap and stack_amap should be the same */
+ bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
+ stack_p = bpf_map_lookup_elem(&stack_amap, &key);
+ if (stack_p) {
+ bpf_get_stack(ctx, stack_p, max_len, TEST_STACK_DEPTH);
+ /* it wrongly skipped all the entries and filled zero */
+ if (stack_p[0] == 0)
+ failed = 1;
+ }
+ } else {
+ /* old kernel doesn't support skipping that many entries */
+ failed = 2;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/stream.c b/tools/testing/selftests/bpf/progs/stream.c
new file mode 100644
index 000000000000..35790897dc87
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/stream.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct arr_elem {
+ struct bpf_res_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct arr_elem);
+} arrmap SEC(".maps");
+
+#define ENOSPC 28
+#define _STR "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+
+int size;
+
+SEC("syscall")
+__success __retval(0)
+int stream_exhaust(void *ctx)
+{
+ /* Use global variable for loop convergence. */
+ size = 0;
+ bpf_repeat(BPF_MAX_LOOPS) {
+ if (bpf_stream_printk(BPF_STDOUT, _STR) == -ENOSPC && size == 99954)
+ return 0;
+ size += sizeof(_STR) - 1;
+ }
+ return 1;
+}
+
+SEC("syscall")
+__success __retval(0)
+int stream_cond_break(void *ctx)
+{
+ while (can_loop)
+ ;
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+int stream_deadlock(void *ctx)
+{
+ struct bpf_res_spin_lock *lock, *nlock;
+
+ lock = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!lock)
+ return 1;
+ nlock = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!nlock)
+ return 1;
+ if (bpf_res_spin_lock(lock))
+ return 1;
+ if (bpf_res_spin_lock(nlock)) {
+ bpf_res_spin_unlock(lock);
+ return 0;
+ }
+ bpf_res_spin_unlock(nlock);
+ bpf_res_spin_unlock(lock);
+ return 1;
+}
+
+SEC("syscall")
+__success __retval(0)
+int stream_syscall(void *ctx)
+{
+ bpf_stream_printk(BPF_STDOUT, "foo");
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/stream_fail.c b/tools/testing/selftests/bpf/progs/stream_fail.c
new file mode 100644
index 000000000000..b4a0d0cc8ec8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/stream_fail.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+SEC("syscall")
+__failure __msg("Possibly NULL pointer passed")
+int stream_vprintk_null_arg(void *ctx)
+{
+ bpf_stream_vprintk(BPF_STDOUT, "", NULL, 0, NULL);
+ return 0;
+}
+
+SEC("syscall")
+__failure __msg("R3 type=scalar expected=")
+int stream_vprintk_scalar_arg(void *ctx)
+{
+ bpf_stream_vprintk(BPF_STDOUT, "", (void *)46, 0, NULL);
+ return 0;
+}
+
+SEC("syscall")
+__failure __msg("arg#1 doesn't point to a const string")
+int stream_vprintk_string_arg(void *ctx)
+{
+ bpf_stream_vprintk(BPF_STDOUT, ctx, NULL, 0, NULL);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c b/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c
new file mode 100644
index 000000000000..53af438bd998
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Red Hat, Inc.*/
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <linux/limits.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+char *user_ptr = (char *)1;
+char *invalid_kern_ptr = (char *)-1;
+
+/*
+ * When passing userspace pointers, the error code differs based on arch:
+ * -ERANGE on arches with non-overlapping address spaces
+ * -EFAULT on other arches
+ */
+#if defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_loongarch) || \
+ defined(__TARGET_ARCH_powerpc) || defined(__TARGET_ARCH_x86)
+#define USER_PTR_ERR -ERANGE
+#else
+#define USER_PTR_ERR -EFAULT
+#endif
+
+/*
+ * On s390, __get_kernel_nofault (used in string kfuncs) returns 0 for NULL and
+ * user_ptr (instead of causing an exception) so the below two groups of tests
+ * are not applicable.
+ */
+#ifndef __TARGET_ARCH_s390
+
+/* Passing NULL to string kfuncs (treated as a userspace ptr) */
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_null1(void *ctx) { return bpf_strcmp(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcmp_null2(void *ctx) { return bpf_strcmp("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strchr_null(void *ctx) { return bpf_strchr(NULL, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strchrnul_null(void *ctx) { return bpf_strchrnul(NULL, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strnchr_null(void *ctx) { return bpf_strnchr(NULL, 1, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strrchr_null(void *ctx) { return bpf_strrchr(NULL, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strlen_null(void *ctx) { return bpf_strlen(NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strnlen_null(void *ctx) { return bpf_strnlen(NULL, 1); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strspn_null1(void *ctx) { return bpf_strspn(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strspn_null2(void *ctx) { return bpf_strspn("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcspn_null1(void *ctx) { return bpf_strcspn(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcspn_null2(void *ctx) { return bpf_strcspn("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strstr_null1(void *ctx) { return bpf_strstr(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strstr_null2(void *ctx) { return bpf_strstr("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strnstr_null1(void *ctx) { return bpf_strnstr(NULL, "hello", 1); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strnstr_null2(void *ctx) { return bpf_strnstr("hello", NULL, 1); }
+
+/* Passing userspace ptr to string kfuncs */
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_user_ptr1(void *ctx) { return bpf_strcmp(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_user_ptr2(void *ctx) { return bpf_strcmp("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strchr_user_ptr(void *ctx) { return bpf_strchr(user_ptr, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strchrnul_user_ptr(void *ctx) { return bpf_strchrnul(user_ptr, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strnchr_user_ptr(void *ctx) { return bpf_strnchr(user_ptr, 1, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strrchr_user_ptr(void *ctx) { return bpf_strrchr(user_ptr, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strlen_user_ptr(void *ctx) { return bpf_strlen(user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strnlen_user_ptr(void *ctx) { return bpf_strnlen(user_ptr, 1); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strspn_user_ptr1(void *ctx) { return bpf_strspn(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strspn_user_ptr2(void *ctx) { return bpf_strspn("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcspn_user_ptr1(void *ctx) { return bpf_strcspn(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcspn_user_ptr2(void *ctx) { return bpf_strcspn("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strstr_user_ptr1(void *ctx) { return bpf_strstr(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strstr_user_ptr2(void *ctx) { return bpf_strstr("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strnstr_user_ptr1(void *ctx) { return bpf_strnstr(user_ptr, "hello", 1); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strnstr_user_ptr2(void *ctx) { return bpf_strnstr("hello", user_ptr, 1); }
+
+#endif /* __TARGET_ARCH_s390 */
+
+/* Passing invalid kernel ptr to string kfuncs should always return -EFAULT */
+SEC("syscall") __retval(-EFAULT) int test_strcmp_pagefault1(void *ctx) { return bpf_strcmp(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strcmp_pagefault2(void *ctx) { return bpf_strcmp("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strchr_pagefault(void *ctx) { return bpf_strchr(invalid_kern_ptr, 'a'); }
+SEC("syscall") __retval(-EFAULT) int test_strchrnul_pagefault(void *ctx) { return bpf_strchrnul(invalid_kern_ptr, 'a'); }
+SEC("syscall") __retval(-EFAULT) int test_strnchr_pagefault(void *ctx) { return bpf_strnchr(invalid_kern_ptr, 1, 'a'); }
+SEC("syscall") __retval(-EFAULT) int test_strrchr_pagefault(void *ctx) { return bpf_strrchr(invalid_kern_ptr, 'a'); }
+SEC("syscall") __retval(-EFAULT) int test_strlen_pagefault(void *ctx) { return bpf_strlen(invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strnlen_pagefault(void *ctx) { return bpf_strnlen(invalid_kern_ptr, 1); }
+SEC("syscall") __retval(-EFAULT) int test_strspn_pagefault1(void *ctx) { return bpf_strspn(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strspn_pagefault2(void *ctx) { return bpf_strspn("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strcspn_pagefault1(void *ctx) { return bpf_strcspn(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strcspn_pagefault2(void *ctx) { return bpf_strcspn("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strstr_pagefault1(void *ctx) { return bpf_strstr(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strstr_pagefault2(void *ctx) { return bpf_strstr("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strnstr_pagefault1(void *ctx) { return bpf_strnstr(invalid_kern_ptr, "hello", 1); }
+SEC("syscall") __retval(-EFAULT) int test_strnstr_pagefault2(void *ctx) { return bpf_strnstr("hello", invalid_kern_ptr, 1); }
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c b/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c
new file mode 100644
index 000000000000..89fb4669b0e9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Red Hat, Inc.*/
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <linux/limits.h>
+
+char long_str[XATTR_SIZE_MAX + 1];
+
+SEC("syscall") int test_strcmp_too_long(void *ctx) { return bpf_strcmp(long_str, long_str); }
+SEC("syscall") int test_strchr_too_long(void *ctx) { return bpf_strchr(long_str, 'b'); }
+SEC("syscall") int test_strchrnul_too_long(void *ctx) { return bpf_strchrnul(long_str, 'b'); }
+SEC("syscall") int test_strnchr_too_long(void *ctx) { return bpf_strnchr(long_str, sizeof(long_str), 'b'); }
+SEC("syscall") int test_strrchr_too_long(void *ctx) { return bpf_strrchr(long_str, 'b'); }
+SEC("syscall") int test_strlen_too_long(void *ctx) { return bpf_strlen(long_str); }
+SEC("syscall") int test_strnlen_too_long(void *ctx) { return bpf_strnlen(long_str, sizeof(long_str)); }
+SEC("syscall") int test_strspn_str_too_long(void *ctx) { return bpf_strspn(long_str, "a"); }
+SEC("syscall") int test_strspn_accept_too_long(void *ctx) { return bpf_strspn("b", long_str); }
+SEC("syscall") int test_strcspn_str_too_long(void *ctx) { return bpf_strcspn(long_str, "b"); }
+SEC("syscall") int test_strcspn_reject_too_long(void *ctx) { return bpf_strcspn("b", long_str); }
+SEC("syscall") int test_strstr_too_long(void *ctx) { return bpf_strstr(long_str, "hello"); }
+SEC("syscall") int test_strnstr_too_long(void *ctx) { return bpf_strnstr(long_str, "hello", sizeof(long_str)); }
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_success.c b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c
new file mode 100644
index 000000000000..46697f381878
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Red Hat, Inc.*/
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+char str[] = "hello world";
+
+#define __test(retval) SEC("syscall") __success __retval(retval)
+
+/* Functional tests */
+__test(0) int test_strcmp_eq(void *ctx) { return bpf_strcmp(str, "hello world"); }
+__test(1) int test_strcmp_neq(void *ctx) { return bpf_strcmp(str, "hello"); }
+__test(1) int test_strchr_found(void *ctx) { return bpf_strchr(str, 'e'); }
+__test(11) int test_strchr_null(void *ctx) { return bpf_strchr(str, '\0'); }
+__test(-ENOENT) int test_strchr_notfound(void *ctx) { return bpf_strchr(str, 'x'); }
+__test(1) int test_strchrnul_found(void *ctx) { return bpf_strchrnul(str, 'e'); }
+__test(11) int test_strchrnul_notfound(void *ctx) { return bpf_strchrnul(str, 'x'); }
+__test(1) int test_strnchr_found(void *ctx) { return bpf_strnchr(str, 5, 'e'); }
+__test(11) int test_strnchr_null(void *ctx) { return bpf_strnchr(str, 12, '\0'); }
+__test(-ENOENT) int test_strnchr_notfound(void *ctx) { return bpf_strnchr(str, 5, 'w'); }
+__test(9) int test_strrchr_found(void *ctx) { return bpf_strrchr(str, 'l'); }
+__test(11) int test_strrchr_null(void *ctx) { return bpf_strrchr(str, '\0'); }
+__test(-ENOENT) int test_strrchr_notfound(void *ctx) { return bpf_strrchr(str, 'x'); }
+__test(11) int test_strlen(void *ctx) { return bpf_strlen(str); }
+__test(11) int test_strnlen(void *ctx) { return bpf_strnlen(str, 12); }
+__test(5) int test_strspn(void *ctx) { return bpf_strspn(str, "ehlo"); }
+__test(2) int test_strcspn(void *ctx) { return bpf_strcspn(str, "lo"); }
+__test(6) int test_strstr_found(void *ctx) { return bpf_strstr(str, "world"); }
+__test(-ENOENT) int test_strstr_notfound(void *ctx) { return bpf_strstr(str, "hi"); }
+__test(0) int test_strstr_empty(void *ctx) { return bpf_strstr(str, ""); }
+__test(0) int test_strnstr_found(void *ctx) { return bpf_strnstr(str, "hello", 6); }
+__test(-ENOENT) int test_strnstr_notfound(void *ctx) { return bpf_strnstr(str, "hi", 10); }
+__test(0) int test_strnstr_empty(void *ctx) { return bpf_strnstr(str, "", 1); }
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/strncmp_bench.c b/tools/testing/selftests/bpf/progs/strncmp_bench.c
new file mode 100644
index 000000000000..f47bf88f8d2a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/strncmp_bench.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define STRNCMP_STR_SZ 4096
+
+/* Will be updated by benchmark before program loading */
+const volatile unsigned int cmp_str_len = 1;
+const char target[STRNCMP_STR_SZ];
+
+long hits = 0;
+char str[STRNCMP_STR_SZ];
+
+char _license[] SEC("license") = "GPL";
+
+static __always_inline int local_strncmp(const char *s1, unsigned int sz,
+ const char *s2)
+{
+ int ret = 0;
+ unsigned int i;
+
+ for (i = 0; i < sz; i++) {
+ /* E.g. 0xff > 0x31 */
+ ret = (unsigned char)s1[i] - (unsigned char)s2[i];
+ if (ret || !s1[i])
+ break;
+ }
+
+ return ret;
+}
+
+SEC("tp/syscalls/sys_enter_getpgid")
+int strncmp_no_helper(void *ctx)
+{
+ const char *target_str = target;
+
+ barrier_var(target_str);
+ if (local_strncmp(str, cmp_str_len + 1, target_str) < 0)
+ __sync_add_and_fetch(&hits, 1);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getpgid")
+int strncmp_helper(void *ctx)
+{
+ if (bpf_strncmp(str, cmp_str_len + 1, target) < 0)
+ __sync_add_and_fetch(&hits, 1);
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/strncmp_test.c b/tools/testing/selftests/bpf/progs/strncmp_test.c
new file mode 100644
index 000000000000..769668feed48
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/strncmp_test.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
+#include <stdbool.h>
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define STRNCMP_STR_SZ 8
+
+const char target[STRNCMP_STR_SZ] = "EEEEEEE";
+char str[STRNCMP_STR_SZ];
+int cmp_ret = 0;
+int target_pid = 0;
+
+const char no_str_target[STRNCMP_STR_SZ] = "12345678";
+char writable_target[STRNCMP_STR_SZ];
+unsigned int no_const_str_size = STRNCMP_STR_SZ;
+
+char _license[] SEC("license") = "GPL";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int do_strncmp(void *ctx)
+{
+ if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+ return 0;
+
+ cmp_ret = bpf_strncmp(str, STRNCMP_STR_SZ, target);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int strncmp_bad_not_const_str_size(void *ctx)
+{
+ /* The value of string size is not const, so will fail */
+ cmp_ret = bpf_strncmp(str, no_const_str_size, target);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int strncmp_bad_writable_target(void *ctx)
+{
+ /* Compared target is not read-only, so will fail */
+ cmp_ret = bpf_strncmp(str, STRNCMP_STR_SZ, writable_target);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int strncmp_bad_not_null_term_target(void *ctx)
+{
+ /* Compared target is not null-terminated, so will fail */
+ cmp_ret = bpf_strncmp(str, STRNCMP_STR_SZ, no_str_target);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
index 7de534f38c3f..a5c74d31a244 100644
--- a/tools/testing/selftests/bpf/progs/strobemeta.h
+++ b/tools/testing/selftests/bpf/progs/strobemeta.h
@@ -10,6 +10,8 @@
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
typedef uint32_t pid_t;
struct task_struct {};
@@ -24,9 +26,11 @@ struct task_struct {};
#define STACK_TABLE_EPOCH_SHIFT 20
#define STROBE_MAX_STR_LEN 1
#define STROBE_MAX_CFGS 32
+#define READ_MAP_VAR_PAYLOAD_CAP \
+ ((1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN)
#define STROBE_MAX_PAYLOAD \
(STROBE_MAX_STRS * STROBE_MAX_STR_LEN + \
- STROBE_MAX_MAPS * (1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN)
+ STROBE_MAX_MAPS * READ_MAP_VAR_PAYLOAD_CAP)
struct strobe_value_header {
/*
@@ -135,7 +139,7 @@ struct strobe_value_loc {
* tpidr_el0 for aarch64).
* TLS_IMM_EXEC: absolute address of GOT entry containing offset
* from thread pointer;
- * TLS_GENERAL_DYN: absolute addres of double GOT entry
+ * TLS_GENERAL_DYN: absolute address of double GOT entry
* containing tls_index_t struct;
*/
int64_t offset;
@@ -355,10 +359,10 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
size_t idx, void *tls_base,
struct strobe_value_generic *value,
struct strobemeta_payload *data,
- void *payload)
+ size_t off)
{
void *location;
- uint32_t len;
+ uint64_t len;
data->str_lens[idx] = 0;
location = calc_location(&cfg->str_locs[idx], tls_base);
@@ -366,10 +370,10 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
return 0;
bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, value->ptr);
+ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, value->ptr);
/*
* if bpf_probe_read_user_str returns error (<0), due to casting to
- * unsinged int, it will become big number, so next check is
+ * unsigned int, it will become big number, so next check is
* sufficient to check for errors AND prove to BPF verifier, that
* bpf_probe_read_user_str won't return anything bigger than
* STROBE_MAX_STR_LEN
@@ -378,31 +382,30 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
return 0;
data->str_lens[idx] = len;
- return len;
+ return off + len;
}
-static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
- size_t idx, void *tls_base,
- struct strobe_value_generic *value,
- struct strobemeta_payload *data,
- void *payload)
+static __always_inline uint64_t read_map_var(struct strobemeta_cfg *cfg,
+ size_t idx, void *tls_base,
+ struct strobe_value_generic *value,
+ struct strobemeta_payload *data,
+ size_t off)
{
struct strobe_map_descr* descr = &data->map_descrs[idx];
struct strobe_map_raw map;
void *location;
- uint32_t len;
- int i;
+ uint64_t len;
descr->tag_len = 0; /* presume no tag is set */
descr->cnt = -1; /* presume no value is set */
location = calc_location(&cfg->map_locs[idx], tls_base);
if (!location)
- return payload;
+ return off;
bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
if (bpf_probe_read_user(&map, sizeof(struct strobe_map_raw), value->ptr))
- return payload;
+ return off;
descr->id = map.id;
descr->cnt = map.cnt;
@@ -411,39 +414,91 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
data->req_meta_valid = 1;
}
- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.tag);
+ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, map.tag);
if (len <= STROBE_MAX_STR_LEN) {
descr->tag_len = len;
- payload += len;
+ off += len;
}
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < STROBE_MAX_MAP_ENTRIES; ++i) {
if (i >= map.cnt)
break;
descr->key_lens[i] = 0;
- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
+ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN,
map.entries[i].key);
if (len <= STROBE_MAX_STR_LEN) {
descr->key_lens[i] = len;
- payload += len;
+ off += len;
}
descr->val_lens[i] = 0;
- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
+ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN,
map.entries[i].val);
if (len <= STROBE_MAX_STR_LEN) {
descr->val_lens[i] = len;
- payload += len;
+ off += len;
}
}
- return payload;
+ return off;
+}
+
+#ifdef USE_BPF_LOOP
+enum read_type {
+ READ_INT_VAR,
+ READ_MAP_VAR,
+ READ_STR_VAR,
+};
+
+struct read_var_ctx {
+ struct strobemeta_payload *data;
+ void *tls_base;
+ struct strobemeta_cfg *cfg;
+ size_t payload_off;
+ /* value gets mutated */
+ struct strobe_value_generic *value;
+ enum read_type type;
+};
+
+static int read_var_callback(__u64 index, struct read_var_ctx *ctx)
+{
+ /* lose precision info for ctx->payload_off, verifier won't track
+ * double xor, barrier_var() is needed to force clang keep both xors.
+ */
+ ctx->payload_off ^= index;
+ barrier_var(ctx->payload_off);
+ ctx->payload_off ^= index;
+ switch (ctx->type) {
+ case READ_INT_VAR:
+ if (index >= STROBE_MAX_INTS)
+ return 1;
+ read_int_var(ctx->cfg, index, ctx->tls_base, ctx->value, ctx->data);
+ break;
+ case READ_MAP_VAR:
+ if (index >= STROBE_MAX_MAPS)
+ return 1;
+ if (ctx->payload_off > sizeof(ctx->data->payload) - READ_MAP_VAR_PAYLOAD_CAP)
+ return 1;
+ ctx->payload_off = read_map_var(ctx->cfg, index, ctx->tls_base,
+ ctx->value, ctx->data, ctx->payload_off);
+ break;
+ case READ_STR_VAR:
+ if (index >= STROBE_MAX_STRS)
+ return 1;
+ if (ctx->payload_off > sizeof(ctx->data->payload) - STROBE_MAX_STR_LEN)
+ return 1;
+ ctx->payload_off = read_str_var(ctx->cfg, index, ctx->tls_base,
+ ctx->value, ctx->data, ctx->payload_off);
+ break;
+ }
+ return 0;
}
+#endif /* USE_BPF_LOOP */
/*
* read_strobe_meta returns NULL, if no metadata was read; otherwise returns
@@ -460,7 +515,8 @@ static void *read_strobe_meta(struct task_struct *task,
pid_t pid = bpf_get_current_pid_tgid() >> 32;
struct strobe_value_generic value = {0};
struct strobemeta_cfg *cfg;
- void *tls_base, *payload;
+ size_t payload_off;
+ void *tls_base;
cfg = bpf_map_lookup_elem(&strobemeta_cfgs, &pid);
if (!cfg)
@@ -468,42 +524,74 @@ static void *read_strobe_meta(struct task_struct *task,
data->int_vals_set_mask = 0;
data->req_meta_valid = 0;
- payload = data->payload;
+ payload_off = 0;
/*
* we don't have struct task_struct definition, it should be:
* tls_base = (void *)task->thread.fsbase;
*/
tls_base = (void *)task;
+#ifdef USE_BPF_LOOP
+ struct read_var_ctx ctx = {
+ .cfg = cfg,
+ .tls_base = tls_base,
+ .value = &value,
+ .data = data,
+ .payload_off = 0,
+ };
+ int err;
+
+ ctx.type = READ_INT_VAR;
+ err = bpf_loop(STROBE_MAX_INTS, read_var_callback, &ctx, 0);
+ if (err != STROBE_MAX_INTS)
+ return NULL;
+
+ ctx.type = READ_STR_VAR;
+ err = bpf_loop(STROBE_MAX_STRS, read_var_callback, &ctx, 0);
+ if (err != STROBE_MAX_STRS)
+ return NULL;
+
+ ctx.type = READ_MAP_VAR;
+ err = bpf_loop(STROBE_MAX_MAPS, read_var_callback, &ctx, 0);
+ if (err != STROBE_MAX_MAPS)
+ return NULL;
+
+ payload_off = ctx.payload_off;
+ /* this should not really happen, here only to satisfy verifier */
+ if (payload_off > sizeof(data->payload))
+ payload_off = sizeof(data->payload);
+#else
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
-#endif
+ __pragma_loop_unroll
+#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_INTS; ++i) {
read_int_var(cfg, i, tls_base, &value, data);
}
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
-#endif
+ __pragma_loop_unroll
+#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_STRS; ++i) {
- payload += read_str_var(cfg, i, tls_base, &value, data, payload);
+ payload_off = read_str_var(cfg, i, tls_base, &value, data, payload_off);
}
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
-#endif
+ __pragma_loop_unroll
+#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_MAPS; ++i) {
- payload = read_map_var(cfg, i, tls_base, &value, data, payload);
+ payload_off = read_map_var(cfg, i, tls_base, &value, data, payload_off);
}
+#endif /* USE_BPF_LOOP */
+
/*
* return pointer right after end of payload, so it's possible to
* calculate exact amount of useful data that needs to be sent
*/
- return payload;
+ return &data->payload[payload_off];
}
SEC("raw_tracepoint/kfree_skb")
diff --git a/tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c b/tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c
new file mode 100644
index 000000000000..d18b992f0165
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2021 Facebook */
+
+#define STROBE_MAX_INTS 2
+#define STROBE_MAX_STRS 25
+#define STROBE_MAX_MAPS 100
+#define STROBE_MAX_MAP_ENTRIES 20
+#define USE_BPF_LOOP
+#include "strobemeta.h"
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c b/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c
new file mode 100644
index 000000000000..ba10c3896213
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int test_1_result = 0;
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1)
+{
+ test_1_result = 42;
+ return 0;
+}
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_2)
+{
+ return 0;
+}
+
+struct bpf_testmod_ops___v1 {
+ int (*test_1)(void);
+};
+
+struct bpf_testmod_ops___v2 {
+ int (*test_1)(void);
+ int (*does_not_exist)(void);
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___v1 testmod_1 = {
+ .test_1 = (void *)test_1
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___v2 testmod_2 = {
+ .test_1 = (void *)test_1,
+ .does_not_exist = (void *)test_2
+};
+
+SEC("?.struct_ops")
+struct bpf_testmod_ops___v1 optional_map = {
+ .test_1 = (void *)test_1,
+};
+
+SEC("?.struct_ops.link")
+struct bpf_testmod_ops___v1 optional_map2 = {
+ .test_1 = (void *)test_1,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c b/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c
new file mode 100644
index 000000000000..6049d9c902d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int test_1_result = 0;
+
+SEC("?struct_ops/test_1")
+int BPF_PROG(foo)
+{
+ test_1_result = 42;
+ return 0;
+}
+
+SEC("?struct_ops/test_1")
+int BPF_PROG(bar)
+{
+ test_1_result = 24;
+ return 0;
+}
+
+struct bpf_testmod_ops {
+ int (*test_1)(void);
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_1 = (void *)bar
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
new file mode 100644
index 000000000000..284a5b008e0c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * This subprogram validates that libbpf handles the situation in which BPF
+ * object has subprograms in .text section, but has no entry BPF programs.
+ * At some point that was causing issues due to legacy logic of treating such
+ * subprogram as entry program (with unknown program type, which would fail).
+ */
+int dangling_subprog(void)
+{
+ /* do nothing, just be here */
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_do_detach;
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c b/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c
new file mode 100644
index 000000000000..d8cc99f5c2e2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1_forgotten)
+{
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops ops = {
+ /* we forgot to reference test_1_forgotten above, oops */
+};
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
new file mode 100644
index 000000000000..36386b3c23a1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
@@ -0,0 +1,30 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+void bpf_task_release(struct task_struct *p) __ksym;
+
+/* This test struct_ops BPF programs returning referenced kptr. The verifier should
+ * allow a referenced kptr or a NULL pointer to be returned. A referenced kptr to task
+ * here is acquried automatically as the task argument is tagged with "__ref".
+ */
+SEC("struct_ops/test_return_ref_kptr")
+struct task_struct *BPF_PROG(kptr_return, int dummy,
+ struct task_struct *task, struct cgroup *cgrp)
+{
+ if (dummy % 2) {
+ bpf_task_release(task);
+ return NULL;
+ }
+ return task;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_kptr_return = {
+ .test_return_ref_kptr = (void *)kptr_return,
+};
+
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c
new file mode 100644
index 000000000000..caeea158ef69
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c
@@ -0,0 +1,26 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+/* This test struct_ops BPF programs returning referenced kptr. The verifier should
+ * reject programs returning a non-zero scalar value.
+ */
+SEC("struct_ops/test_return_ref_kptr")
+__failure __msg("At program exit the register R0 has smin=1 smax=1 should have been in [0, 0]")
+struct task_struct *BPF_PROG(kptr_return_fail__invalid_scalar, int dummy,
+ struct task_struct *task, struct cgroup *cgrp)
+{
+ bpf_task_release(task);
+ return (struct task_struct *)1;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_kptr_return = {
+ .test_return_ref_kptr = (void *)kptr_return_fail__invalid_scalar,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c
new file mode 100644
index 000000000000..b8b4f05c3d7f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c
@@ -0,0 +1,34 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+/* This test struct_ops BPF programs returning referenced kptr. The verifier should
+ * reject programs returning a local kptr.
+ */
+SEC("struct_ops/test_return_ref_kptr")
+__failure __msg("At program exit the register R0 is not a known value (ptr_or_null_)")
+struct task_struct *BPF_PROG(kptr_return_fail__local_kptr, int dummy,
+ struct task_struct *task, struct cgroup *cgrp)
+{
+ struct task_struct *t;
+
+ bpf_task_release(task);
+
+ t = bpf_obj_new(typeof(*task));
+ if (!t)
+ return NULL;
+
+ return t;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_kptr_return = {
+ .test_return_ref_kptr = (void *)kptr_return_fail__local_kptr,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c
new file mode 100644
index 000000000000..7ddeb28c2329
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c
@@ -0,0 +1,25 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+/* This test struct_ops BPF programs returning referenced kptr. The verifier should
+ * reject programs returning a modified referenced kptr.
+ */
+SEC("struct_ops/test_return_ref_kptr")
+__failure __msg("dereference of modified trusted_ptr_ ptr R0 off={{[0-9]+}} disallowed")
+struct task_struct *BPF_PROG(kptr_return_fail__nonzero_offset, int dummy,
+ struct task_struct *task, struct cgroup *cgrp)
+{
+ return (struct task_struct *)&task->jobctl;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_kptr_return = {
+ .test_return_ref_kptr = (void *)kptr_return_fail__nonzero_offset,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c
new file mode 100644
index 000000000000..6a2dd5367802
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c
@@ -0,0 +1,30 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+/* This test struct_ops BPF programs returning referenced kptr. The verifier should
+ * reject programs returning a referenced kptr of the wrong type.
+ */
+SEC("struct_ops/test_return_ref_kptr")
+__failure __msg("At program exit the register R0 is not a known value (ptr_or_null_)")
+struct task_struct *BPF_PROG(kptr_return_fail__wrong_type, int dummy,
+ struct task_struct *task, struct cgroup *cgrp)
+{
+ struct task_struct *ret;
+
+ ret = (struct task_struct *)bpf_cgroup_acquire(cgrp);
+ bpf_task_release(task);
+
+ return ret;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_kptr_return = {
+ .test_return_ref_kptr = (void *)kptr_return_fail__wrong_type,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
new file mode 100644
index 000000000000..ccab3935aa42
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t tgid = 0;
+
+/* This is a test BPF program that uses struct_ops to access an argument
+ * that may be NULL. This is a test for the verifier to ensure that it can
+ * rip PTR_MAYBE_NULL correctly.
+ */
+SEC("struct_ops/test_maybe_null")
+int BPF_PROG(test_maybe_null, int dummy,
+ struct task_struct *task)
+{
+ if (task)
+ tgid = task->tgid;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_maybe_null = (void *)test_maybe_null,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
new file mode 100644
index 000000000000..8b5515f4f724
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t tgid = 0;
+
+SEC("struct_ops/test_maybe_null_struct_ptr")
+int BPF_PROG(test_maybe_null_struct_ptr, int dummy,
+ struct task_struct *task)
+{
+ tgid = task->tgid;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_struct_ptr = {
+ .test_maybe_null = (void *)test_maybe_null_struct_ptr,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_module.c b/tools/testing/selftests/bpf/progs/struct_ops_module.c
new file mode 100644
index 000000000000..71c420c3a5a6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_module.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+int test_1_result = 0;
+int test_2_result = 0;
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1)
+{
+ test_1_result = 0xdeadbeef;
+ return 0;
+}
+
+SEC("struct_ops/test_2")
+void BPF_PROG(test_2, int a, int b)
+{
+ test_2_result = a + b;
+}
+
+SEC("?struct_ops/test_3")
+int BPF_PROG(test_3, int a, int b)
+{
+ test_2_result = a + b + 3;
+ return a + b + 3;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+ .data = 0x1,
+};
+
+SEC("struct_ops/test_2")
+void BPF_PROG(test_2_v2, int a, int b)
+{
+ test_2_result = a * b;
+}
+
+struct bpf_testmod_ops___v2 {
+ int (*test_1)(void);
+ void (*test_2)(int a, int b);
+ int (*test_maybe_null)(int dummy, struct task_struct *task);
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___v2 testmod_2 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2_v2,
+};
+
+struct bpf_testmod_ops___zeroed {
+ int (*test_1)(void);
+ void (*test_2)(int a, int b);
+ int (*test_maybe_null)(int dummy, struct task_struct *task);
+ void (*zeroed_op)(int a, int b);
+ int zeroed;
+};
+
+SEC("struct_ops/test_3")
+int BPF_PROG(zeroed_op)
+{
+ return 1;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___zeroed testmod_zeroed = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2_v2,
+ .zeroed_op = (void *)zeroed_op,
+};
+
+struct bpf_testmod_ops___incompatible {
+ int (*test_1)(void);
+ void (*test_2)(int *a);
+ int data;
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___incompatible testmod_incompatible = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+ .data = 3,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c
new file mode 100644
index 000000000000..5b23ea817f1f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define TRAMP(x) \
+ SEC("struct_ops/tramp_" #x) \
+ int BPF_PROG(tramp_ ## x, int a) \
+ { \
+ return a; \
+ }
+
+TRAMP(1)
+TRAMP(2)
+TRAMP(3)
+TRAMP(4)
+TRAMP(5)
+TRAMP(6)
+TRAMP(7)
+TRAMP(8)
+TRAMP(9)
+TRAMP(10)
+TRAMP(11)
+TRAMP(12)
+TRAMP(13)
+TRAMP(14)
+TRAMP(15)
+TRAMP(16)
+TRAMP(17)
+TRAMP(18)
+TRAMP(19)
+TRAMP(20)
+TRAMP(21)
+TRAMP(22)
+TRAMP(23)
+TRAMP(24)
+TRAMP(25)
+TRAMP(26)
+TRAMP(27)
+TRAMP(28)
+TRAMP(29)
+TRAMP(30)
+TRAMP(31)
+TRAMP(32)
+TRAMP(33)
+TRAMP(34)
+TRAMP(35)
+TRAMP(36)
+TRAMP(37)
+TRAMP(38)
+TRAMP(39)
+TRAMP(40)
+
+#define F_TRAMP(x) .tramp_ ## x = (void *)tramp_ ## x
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops multi_pages = {
+ F_TRAMP(1),
+ F_TRAMP(2),
+ F_TRAMP(3),
+ F_TRAMP(4),
+ F_TRAMP(5),
+ F_TRAMP(6),
+ F_TRAMP(7),
+ F_TRAMP(8),
+ F_TRAMP(9),
+ F_TRAMP(10),
+ F_TRAMP(11),
+ F_TRAMP(12),
+ F_TRAMP(13),
+ F_TRAMP(14),
+ F_TRAMP(15),
+ F_TRAMP(16),
+ F_TRAMP(17),
+ F_TRAMP(18),
+ F_TRAMP(19),
+ F_TRAMP(20),
+ F_TRAMP(21),
+ F_TRAMP(22),
+ F_TRAMP(23),
+ F_TRAMP(24),
+ F_TRAMP(25),
+ F_TRAMP(26),
+ F_TRAMP(27),
+ F_TRAMP(28),
+ F_TRAMP(29),
+ F_TRAMP(30),
+ F_TRAMP(31),
+ F_TRAMP(32),
+ F_TRAMP(33),
+ F_TRAMP(34),
+ F_TRAMP(35),
+ F_TRAMP(36),
+ F_TRAMP(37),
+ F_TRAMP(38),
+ F_TRAMP(39),
+ F_TRAMP(40),
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c b/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c
new file mode 100644
index 000000000000..5d0937fa07be
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+int rand;
+int arr[1];
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1_turn_off)
+{
+ return arr[rand]; /* potentially way out of range access */
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops ops = {
+ .test_1 = (void *)test_1_turn_off,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c
new file mode 100644
index 000000000000..dbe646013811
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
+bool skip __attribute((__section__(".data"))) = false;
+#else
+bool skip = true;
+#endif
+
+void bpf_testmod_ops3_call_test_2(void) __ksym;
+
+int val_i, val_j;
+
+__noinline static int subprog2(int *a, int *b)
+{
+ return val_i + a[10] + b[20];
+}
+
+__noinline static int subprog1(int *a)
+{
+ /* stack size 200 bytes */
+ int b[50] = {};
+
+ b[20] = 2;
+ return subprog2(a, b);
+}
+
+
+SEC("struct_ops")
+int BPF_PROG(test_1)
+{
+ /* stack size 400 bytes */
+ int a[100] = {};
+
+ a[10] = 1;
+ val_i = subprog1(a);
+ bpf_testmod_ops3_call_test_2();
+ return 0;
+}
+
+SEC("struct_ops")
+int BPF_PROG(test_2)
+{
+ /* stack size 200 bytes */
+ int a[50] = {};
+
+ a[10] = 3;
+ val_j = subprog1(a);
+ return 0;
+}
+
+SEC(".struct_ops")
+struct bpf_testmod_ops3 testmod_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c
new file mode 100644
index 000000000000..3d89ad7cbe2a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
+bool skip __attribute((__section__(".data"))) = false;
+#else
+bool skip = true;
+#endif
+
+void bpf_testmod_ops3_call_test_2(void) __ksym;
+
+int val_i, val_j;
+
+__noinline static int subprog2(int *a, int *b)
+{
+ return val_i + a[10] + b[20];
+}
+
+__noinline static int subprog1(int *a)
+{
+ /* stack size 200 bytes */
+ int b[50] = {};
+
+ b[20] = 2;
+ return subprog2(a, b);
+}
+
+
+SEC("struct_ops")
+int BPF_PROG(test_1)
+{
+ /* stack size 100 bytes */
+ int a[25] = {};
+
+ a[10] = 1;
+ val_i = subprog1(a);
+ bpf_testmod_ops3_call_test_2();
+ return 0;
+}
+
+SEC("struct_ops")
+int BPF_PROG(test_2)
+{
+ /* stack size 400 bytes */
+ int a[100] = {};
+
+ a[10] = 3;
+ val_j = subprog1(a);
+ return 0;
+}
+
+SEC(".struct_ops")
+struct bpf_testmod_ops3 testmod_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
new file mode 100644
index 000000000000..b1f6d7e5a8e5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
+bool skip __attribute((__section__(".data"))) = false;
+#else
+bool skip = true;
+#endif
+
+void bpf_testmod_ops3_call_test_1(void) __ksym;
+
+int val_i, val_j;
+
+__noinline static int subprog2(int *a, int *b)
+{
+ return val_i + a[1] + b[20];
+}
+
+__noinline static int subprog1(int *a)
+{
+ /* stack size 400 bytes */
+ int b[100] = {};
+
+ b[20] = 2;
+ return subprog2(a, b);
+}
+
+
+SEC("struct_ops")
+int BPF_PROG(test_1)
+{
+ /* stack size 20 bytes */
+ int a[5] = {};
+
+ a[1] = 1;
+ val_j += subprog1(a);
+ bpf_testmod_ops3_call_test_1();
+ return 0;
+}
+
+SEC(".struct_ops")
+struct bpf_testmod_ops3 testmod_1 = {
+ .test_1 = (void *)test_1,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
new file mode 100644
index 000000000000..76dcb6089d7f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
@@ -0,0 +1,31 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__attribute__((nomerge)) extern void bpf_task_release(struct task_struct *p) __ksym;
+
+/* This is a test BPF program that uses struct_ops to access a referenced
+ * kptr argument. This is a test for the verifier to ensure that it
+ * 1) recongnizes the task as a referenced object (i.e., ref_obj_id > 0), and
+ * 2) the same reference can be acquired from multiple paths as long as it
+ * has not been released.
+ */
+SEC("struct_ops/test_refcounted")
+int BPF_PROG(refcounted, int dummy, struct task_struct *task)
+{
+ if (dummy == 1)
+ bpf_task_release(task);
+ else
+ bpf_task_release(task);
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_refcounted = {
+ .test_refcounted = (void *)refcounted,
+};
+
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c
new file mode 100644
index 000000000000..ae074aa62852
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c
@@ -0,0 +1,39 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+extern void bpf_task_release(struct task_struct *p) __ksym;
+
+__noinline int subprog_release(__u64 *ctx __arg_ctx)
+{
+ struct task_struct *task = (struct task_struct *)ctx[1];
+ int dummy = (int)ctx[0];
+
+ bpf_task_release(task);
+
+ return dummy + 1;
+}
+
+/* Test that the verifier rejects a program that contains a global
+ * subprogram with referenced kptr arguments
+ */
+SEC("struct_ops/test_refcounted")
+__failure __log_level(2)
+__msg("Validating subprog_release() func#1...")
+__msg("invalid bpf_context access off=8. Reference may already be released")
+int refcounted_fail__global_subprog(unsigned long long *ctx)
+{
+ struct task_struct *task = (struct task_struct *)ctx[1];
+
+ bpf_task_release(task);
+
+ return subprog_release(ctx);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_ref_acquire = {
+ .test_refcounted = (void *)refcounted_fail__global_subprog,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__ref_leak.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__ref_leak.c
new file mode 100644
index 000000000000..e945b1a04294
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__ref_leak.c
@@ -0,0 +1,22 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Test that the verifier rejects a program that acquires a referenced
+ * kptr through context without releasing the reference
+ */
+SEC("struct_ops/test_refcounted")
+__failure __msg("Unreleased reference id=1 alloc_insn=0")
+int BPF_PROG(refcounted_fail__ref_leak, int dummy,
+ struct task_struct *task)
+{
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_ref_acquire = {
+ .test_refcounted = (void *)refcounted_fail__ref_leak,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c
new file mode 100644
index 000000000000..3b125025a1f2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} prog_array SEC(".maps");
+
+/* Test that the verifier rejects a program with referenced kptr arguments
+ * that tail call
+ */
+SEC("struct_ops/test_refcounted")
+__failure __msg("program with __ref argument cannot tail call")
+int refcounted_fail__tail_call(unsigned long long *ctx)
+{
+ struct task_struct *task = (struct task_struct *)ctx[1];
+
+ bpf_task_release(task);
+ bpf_tail_call(ctx, &prog_array, 0);
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_ref_acquire = {
+ .test_refcounted = (void *)refcounted_fail__tail_call,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/summarization.c b/tools/testing/selftests/bpf/progs/summarization.c
new file mode 100644
index 000000000000..f89effe82c9e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/summarization.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__noinline
+long changes_pkt_data(struct __sk_buff *sk)
+{
+ return bpf_skb_pull_data(sk, 0);
+}
+
+__noinline __weak
+long does_not_change_pkt_data(struct __sk_buff *sk)
+{
+ return 0;
+}
+
+SEC("?tc")
+int main_changes_with_subprogs(struct __sk_buff *sk)
+{
+ changes_pkt_data(sk);
+ does_not_change_pkt_data(sk);
+ return 0;
+}
+
+SEC("?tc")
+int main_changes(struct __sk_buff *sk)
+{
+ bpf_skb_pull_data(sk, 0);
+ return 0;
+}
+
+SEC("?tc")
+int main_does_not_change(struct __sk_buff *sk)
+{
+ return 0;
+}
+
+__noinline
+long might_sleep(struct pt_regs *ctx __arg_ctx)
+{
+ int i;
+
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+__noinline __weak
+long does_not_sleep(struct pt_regs *ctx __arg_ctx)
+{
+ return 0;
+}
+
+SEC("?uprobe.s")
+int main_might_sleep_with_subprogs(struct pt_regs *ctx)
+{
+ might_sleep(ctx);
+ does_not_sleep(ctx);
+ return 0;
+}
+
+SEC("?uprobe.s")
+int main_might_sleep(struct pt_regs *ctx)
+{
+ int i;
+
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+SEC("?uprobe.s")
+int main_does_not_sleep(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/summarization_freplace.c b/tools/testing/selftests/bpf/progs/summarization_freplace.c
new file mode 100644
index 000000000000..935f00e0e9ea
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/summarization_freplace.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("?freplace")
+long changes_pkt_data(struct __sk_buff *sk)
+{
+ return bpf_skb_pull_data(sk, 0);
+}
+
+SEC("?freplace")
+long does_not_change_pkt_data(struct __sk_buff *sk)
+{
+ return 0;
+}
+
+SEC("?freplace")
+long might_sleep(struct pt_regs *ctx)
+{
+ int i;
+
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+SEC("?freplace")
+long does_not_sleep(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/syscall.c b/tools/testing/selftests/bpf/progs/syscall.c
new file mode 100644
index 000000000000..b698cc62a371
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/syscall.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <../../../tools/include/linux/filter.h>
+#include <linux/btf.h>
+#include <string.h>
+#include <errno.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct bpf_map {
+ int id;
+} __attribute__((preserve_access_index));
+
+struct args {
+ __u64 log_buf;
+ __u32 log_size;
+ int max_entries;
+ int map_fd;
+ int prog_fd;
+ int btf_fd;
+};
+
+#define BTF_INFO_ENC(kind, kind_flag, vlen) \
+ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
+#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
+#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
+ ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
+#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
+ BTF_INT_ENC(encoding, bits_offset, bits)
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, union bpf_attr);
+ __uint(max_entries, 1);
+} bpf_attr_array SEC(".maps");
+
+struct inner_map_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+} inner_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+ __array(values, struct inner_map_type);
+} outer_array_map SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
+
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64) (unsigned long) ptr;
+}
+
+static int btf_load(void)
+{
+ struct btf_blob {
+ struct btf_header btf_hdr;
+ __u32 types[8];
+ __u32 str;
+ } raw_btf = {
+ .btf_hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = sizeof(raw_btf.types),
+ .str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types),
+ .str_len = sizeof(raw_btf.str),
+ },
+ .types = {
+ /* long */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [1] */
+ /* unsigned long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ },
+ };
+ static union bpf_attr btf_load_attr = {
+ .btf_size = sizeof(raw_btf),
+ };
+
+ btf_load_attr.btf = (long)&raw_btf;
+ return bpf_sys_bpf(BPF_BTF_LOAD, &btf_load_attr, sizeof(btf_load_attr));
+}
+
+SEC("syscall")
+int load_prog(struct args *ctx)
+{
+ static char license[] = "GPL";
+ static struct bpf_insn insns[] = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ static union bpf_attr map_create_attr = {
+ .map_type = BPF_MAP_TYPE_HASH,
+ .key_size = 8,
+ .value_size = 8,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 2,
+ };
+ static union bpf_attr map_update_attr = { .map_fd = 1, };
+ static __u64 key = 12;
+ static __u64 value = 34;
+ static union bpf_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .insn_cnt = ARRAY_SIZE(insns),
+ };
+ int ret;
+
+ ret = btf_load();
+ if (ret <= 0)
+ return ret;
+
+ ctx->btf_fd = ret;
+ map_create_attr.max_entries = ctx->max_entries;
+ map_create_attr.btf_fd = ret;
+
+ prog_load_attr.license = ptr_to_u64(license);
+ prog_load_attr.insns = ptr_to_u64(insns);
+ prog_load_attr.log_buf = ctx->log_buf;
+ prog_load_attr.log_size = ctx->log_size;
+ prog_load_attr.log_level = 1;
+
+ ret = bpf_sys_bpf(BPF_MAP_CREATE, &map_create_attr, sizeof(map_create_attr));
+ if (ret <= 0)
+ return ret;
+ ctx->map_fd = ret;
+ insns[3].imm = ret;
+
+ map_update_attr.map_fd = ret;
+ map_update_attr.key = ptr_to_u64(&key);
+ map_update_attr.value = ptr_to_u64(&value);
+ ret = bpf_sys_bpf(BPF_MAP_UPDATE_ELEM, &map_update_attr, sizeof(map_update_attr));
+ if (ret < 0)
+ return ret;
+
+ ret = bpf_sys_bpf(BPF_PROG_LOAD, &prog_load_attr, sizeof(prog_load_attr));
+ if (ret <= 0)
+ return ret;
+ ctx->prog_fd = ret;
+ return 1;
+}
+
+SEC("syscall")
+int update_outer_map(void *ctx)
+{
+ int zero = 0, ret = 0, outer_fd = -1, inner_fd = -1, err;
+ const int attr_sz = sizeof(union bpf_attr);
+ union bpf_attr *attr;
+
+ attr = bpf_map_lookup_elem((struct bpf_map *)&bpf_attr_array, &zero);
+ if (!attr)
+ goto out;
+
+ memset(attr, 0, attr_sz);
+ attr->map_id = ((struct bpf_map *)&outer_array_map)->id;
+ outer_fd = bpf_sys_bpf(BPF_MAP_GET_FD_BY_ID, attr, attr_sz);
+ if (outer_fd < 0)
+ goto out;
+
+ memset(attr, 0, attr_sz);
+ attr->map_type = BPF_MAP_TYPE_ARRAY;
+ attr->key_size = 4;
+ attr->value_size = 4;
+ attr->max_entries = 1;
+ inner_fd = bpf_sys_bpf(BPF_MAP_CREATE, attr, attr_sz);
+ if (inner_fd < 0)
+ goto out;
+
+ memset(attr, 0, attr_sz);
+ attr->map_fd = outer_fd;
+ attr->key = ptr_to_u64(&zero);
+ attr->value = ptr_to_u64(&inner_fd);
+ err = bpf_sys_bpf(BPF_MAP_UPDATE_ELEM, attr, attr_sz);
+ if (err)
+ goto out;
+
+ memset(attr, 0, attr_sz);
+ attr->map_fd = outer_fd;
+ attr->key = ptr_to_u64(&zero);
+ err = bpf_sys_bpf(BPF_MAP_DELETE_ELEM, attr, attr_sz);
+ if (err)
+ goto out;
+ ret = 1;
+out:
+ if (inner_fd >= 0)
+ bpf_sys_close(inner_fd);
+ if (outer_fd >= 0)
+ bpf_sys_close(outer_fd);
+ return ret;
+}
diff --git a/tools/testing/selftests/bpf/progs/tailcall1.c b/tools/testing/selftests/bpf/progs/tailcall1.c
index 7115bcefbe8a..8159a0b4a69a 100644
--- a/tools/testing/selftests/bpf/progs/tailcall1.c
+++ b/tools/testing/selftests/bpf/progs/tailcall1.c
@@ -11,8 +11,8 @@ struct {
} jmp_table SEC(".maps");
#define TAIL_FUNC(x) \
- SEC("classifier/" #x) \
- int bpf_func_##x(struct __sk_buff *skb) \
+ SEC("tc") \
+ int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
@@ -20,7 +20,7 @@ TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
/* Multiple locations to make sure we patch
@@ -45,4 +45,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall2.c b/tools/testing/selftests/bpf/progs/tailcall2.c
index 0431e4fe7efd..a5ff53e61702 100644
--- a/tools/testing/selftests/bpf/progs/tailcall2.c
+++ b/tools/testing/selftests/bpf/progs/tailcall2.c
@@ -10,41 +10,41 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 1);
return 0;
}
-SEC("classifier/1")
-int bpf_func_1(struct __sk_buff *skb)
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 2);
return 1;
}
-SEC("classifier/2")
-int bpf_func_2(struct __sk_buff *skb)
+SEC("tc")
+int classifier_2(struct __sk_buff *skb)
{
return 2;
}
-SEC("classifier/3")
-int bpf_func_3(struct __sk_buff *skb)
+SEC("tc")
+int classifier_3(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 4);
return 3;
}
-SEC("classifier/4")
-int bpf_func_4(struct __sk_buff *skb)
+SEC("tc")
+int classifier_4(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 3);
return 4;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
@@ -56,4 +56,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall3.c b/tools/testing/selftests/bpf/progs/tailcall3.c
index 739dc2a51e74..f60bcd7b8d4b 100644
--- a/tools/testing/selftests/bpf/progs/tailcall3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall3.c
@@ -10,17 +10,17 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int count;
+int count = 0;
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
count++;
bpf_tail_call_static(skb, &jmp_table, 0);
return 1;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
@@ -28,4 +28,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall4.c b/tools/testing/selftests/bpf/progs/tailcall4.c
index f82075b47d7d..a56bbc2313ca 100644
--- a/tools/testing/selftests/bpf/progs/tailcall4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall4.c
@@ -10,11 +10,11 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int selector;
+int selector = 0;
#define TAIL_FUNC(x) \
- SEC("classifier/" #x) \
- int bpf_func_##x(struct __sk_buff *skb) \
+ SEC("tc") \
+ int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
@@ -22,7 +22,7 @@ TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call(skb, &jmp_table, selector);
@@ -30,4 +30,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall5.c b/tools/testing/selftests/bpf/progs/tailcall5.c
index ce5450744fd4..8d03496eb6ca 100644
--- a/tools/testing/selftests/bpf/progs/tailcall5.c
+++ b/tools/testing/selftests/bpf/progs/tailcall5.c
@@ -10,11 +10,11 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int selector;
+int selector = 0;
#define TAIL_FUNC(x) \
- SEC("classifier/" #x) \
- int bpf_func_##x(struct __sk_buff *skb) \
+ SEC("tc") \
+ int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
@@ -22,7 +22,7 @@ TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
int idx = 0;
@@ -37,4 +37,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall6.c b/tools/testing/selftests/bpf/progs/tailcall6.c
new file mode 100644
index 000000000000..d77b8abd62f3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall6.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int count, which;
+
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
+{
+ count++;
+ if (__builtin_constant_p(which))
+ __bpf_unreachable();
+ bpf_tail_call(skb, &jmp_table, which);
+ return 1;
+}
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ if (__builtin_constant_p(which))
+ __bpf_unreachable();
+ bpf_tail_call(skb, &jmp_table, which);
+ return 0;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
index 0103f3dd9f02..8c91428deb90 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
@@ -10,8 +10,8 @@ struct {
} jmp_table SEC(".maps");
#define TAIL_FUNC(x) \
- SEC("classifier/" #x) \
- int bpf_func_##x(struct __sk_buff *skb) \
+ SEC("tc") \
+ int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
@@ -26,7 +26,7 @@ int subprog_tail(struct __sk_buff *skb)
return skb->len * 2;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 1);
@@ -35,4 +35,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
index 7b1c04183824..ce97d141daee 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
@@ -20,16 +20,16 @@ int subprog_tail(struct __sk_buff *skb)
return 1;
}
-static volatile int count;
+int count = 0;
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
count++;
return subprog_tail(skb);
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
@@ -38,4 +38,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
index 0d5482bea6c9..99c8d1d8a187 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
@@ -2,6 +2,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_legacy.h"
+#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
@@ -20,6 +21,8 @@ int subprog_tail2(struct __sk_buff *skb)
else
bpf_tail_call_static(skb, &jmp_table, 1);
+ __sink(arr[sizeof(arr) - 1]);
+
return skb->len;
}
@@ -30,32 +33,39 @@ int subprog_tail(struct __sk_buff *skb)
bpf_tail_call_static(skb, &jmp_table, 0);
+ __sink(arr[sizeof(arr) - 1]);
+
return skb->len * 2;
}
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
volatile char arr[128] = {};
+ __sink(arr[sizeof(arr) - 1]);
+
return subprog_tail2(skb);
}
-SEC("classifier/1")
-int bpf_func_1(struct __sk_buff *skb)
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
{
volatile char arr[128] = {};
+ __sink(arr[sizeof(arr) - 1]);
+
return skb->len * 3;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
volatile char arr[128] = {};
+ __sink(arr[sizeof(arr) - 1]);
+
return subprog_tail(skb);
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
index 9a1b166b7fbe..a017d6b2f1dd 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
@@ -3,17 +3,35 @@
#include <bpf/bpf_helpers.h>
struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} nop_table SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 3);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int count;
+int count = 0;
+int noise = 0;
+
+static __always_inline int subprog_noise(void)
+{
+ __u32 key = 0;
+
+ bpf_map_lookup_elem(&nop_table, &key);
+ return 0;
+}
__noinline
int subprog_tail_2(struct __sk_buff *skb)
{
+ if (noise)
+ subprog_noise();
bpf_tail_call_static(skb, &jmp_table, 2);
return skb->len * 3;
}
@@ -32,30 +50,29 @@ int subprog_tail(struct __sk_buff *skb)
return skb->len;
}
-SEC("classifier/1")
-int bpf_func_1(struct __sk_buff *skb)
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
{
return subprog_tail_2(skb);
}
-SEC("classifier/2")
-int bpf_func_2(struct __sk_buff *skb)
+SEC("tc")
+int classifier_2(struct __sk_buff *skb)
{
count++;
return subprog_tail_2(skb);
}
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
return subprog_tail_1(skb);
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
return subprog_tail(skb);
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c
new file mode 100644
index 000000000000..4a9f63bea66c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define __unused __attribute__((unused))
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int done = 0;
+
+SEC("tc")
+int classifier_0(struct __sk_buff *skb __unused)
+{
+ done = 1;
+ return 0;
+}
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ /* Don't propagate the constant to the caller */
+ volatile int ret = 1;
+
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return ret;
+}
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ /* Have data on stack which size is not a multiple of 8 */
+ volatile char arr[1] = {};
+
+ __sink(arr[0]);
+
+ return subprog_tail(skb);
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c
new file mode 100644
index 000000000000..8436c6729167
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Leon Hwang */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int count = 0;
+
+SEC("fentry/subprog_tail")
+int BPF_PROG(fentry, struct sk_buff *skb)
+{
+ count++;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c
new file mode 100644
index 000000000000..fe16412c6e6e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Leon Hwang */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int count = 0;
+
+SEC("fexit/subprog_tail")
+int BPF_PROG(fexit, struct sk_buff *skb)
+{
+ count++;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c
new file mode 100644
index 000000000000..327ca395e860
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int count = 0;
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return 0;
+}
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ int ret = 1;
+
+ count++;
+ subprog_tail(skb);
+ subprog_tail(skb);
+
+ return ret;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c
new file mode 100644
index 000000000000..72fd0d577506
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+int classifier_0(struct __sk_buff *skb);
+int classifier_1(struct __sk_buff *skb);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 2);
+ __uint(key_size, sizeof(__u32));
+ __array(values, void (void));
+} jmp_table SEC(".maps") = {
+ .values = {
+ [0] = (void *) &classifier_0,
+ [1] = (void *) &classifier_1,
+ },
+};
+
+int count0 = 0;
+int count1 = 0;
+
+static __noinline
+int subprog_tail0(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return 0;
+}
+
+__auxiliary
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
+{
+ count0++;
+ subprog_tail0(skb);
+ return 0;
+}
+
+static __noinline
+int subprog_tail1(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 1);
+ return 0;
+}
+
+__auxiliary
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
+{
+ count1++;
+ subprog_tail1(skb);
+ return 0;
+}
+
+__success
+__retval(33)
+SEC("tc")
+int tailcall_bpf2bpf_hierarchy_2(struct __sk_buff *skb)
+{
+ int ret = 0;
+
+ subprog_tail0(skb);
+ subprog_tail1(skb);
+
+ __sink(ret);
+ return (count1 << 16) | count0;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c
new file mode 100644
index 000000000000..a7fb91cb05b7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+int classifier_0(struct __sk_buff *skb);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __array(values, void (void));
+} jmp_table0 SEC(".maps") = {
+ .values = {
+ [0] = (void *) &classifier_0,
+ },
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __array(values, void (void));
+} jmp_table1 SEC(".maps") = {
+ .values = {
+ [0] = (void *) &classifier_0,
+ },
+};
+
+int count = 0;
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb, void *jmp_table)
+{
+ bpf_tail_call_static(skb, jmp_table, 0);
+ return 0;
+}
+
+__auxiliary
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
+{
+ count++;
+ subprog_tail(skb, &jmp_table0);
+ subprog_tail(skb, &jmp_table1);
+ return count;
+}
+
+__success
+__retval(33)
+SEC("tc")
+int tailcall_bpf2bpf_hierarchy_3(struct __sk_buff *skb)
+{
+ int ret = 0;
+
+ bpf_tail_call_static(skb, &jmp_table0, 0);
+
+ __sink(ret);
+ return ret;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c
new file mode 100644
index 000000000000..c87f9ca982d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Leon Hwang */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int count = 0;
+
+static __noinline
+int subprog_tail(void *ctx)
+{
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+SEC("fentry/dummy")
+int BPF_PROG(fentry, struct sk_buff *skb)
+{
+ count++;
+ subprog_tail(ctx);
+ subprog_tail(ctx);
+
+ return 0;
+}
+
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_fail.c b/tools/testing/selftests/bpf/progs/tailcall_fail.c
new file mode 100644
index 000000000000..bc77921d2bb0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_fail.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+extern void bpf_rcu_read_lock(void) __ksym;
+extern void bpf_rcu_read_unlock(void) __ksym;
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+
+private(A) struct bpf_spin_lock lock;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 3);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_tail_call_spin_lock(struct __sk_buff *ctx)
+{
+ bpf_spin_lock(&lock);
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("tail_call cannot be used inside bpf_rcu_read_lock-ed region")
+int reject_tail_call_rcu_lock(struct __sk_buff *ctx)
+{
+ bpf_rcu_read_lock();
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("tail_call cannot be used inside bpf_preempt_disable-ed region")
+int reject_tail_call_preempt_lock(struct __sk_buff *ctx)
+{
+ bpf_guard_preempt();
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("tail_call would lead to reference leak")
+int reject_tail_call_ref(struct __sk_buff *ctx)
+{
+ struct foo { int i; } *p;
+
+ p = bpf_obj_new(typeof(*p));
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_freplace.c b/tools/testing/selftests/bpf/progs/tailcall_freplace.c
new file mode 100644
index 000000000000..6713b809df44
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_freplace.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int count = 0;
+
+SEC("freplace")
+int entry_freplace(struct __sk_buff *skb)
+{
+ count++;
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return count;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_poke.c b/tools/testing/selftests/bpf/progs/tailcall_poke.c
new file mode 100644
index 000000000000..c78b94b75e83
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_poke.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(test, int a)
+{
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(call1, int a)
+{
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(call2, int a)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_common.h b/tools/testing/selftests/bpf/progs/task_kfunc_common.h
new file mode 100644
index 000000000000..e9c4fea7a4bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_common.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _TASK_KFUNC_COMMON_H
+#define _TASK_KFUNC_COMMON_H
+
+#include <errno.h>
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct __tasks_kfunc_map_value {
+ struct task_struct __kptr * task;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct __tasks_kfunc_map_value);
+ __uint(max_entries, 1);
+} __tasks_kfunc_map SEC(".maps");
+
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+struct task_struct *bpf_task_from_pid(s32 pid) __ksym;
+struct task_struct *bpf_task_from_vpid(s32 vpid) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+static inline struct __tasks_kfunc_map_value *tasks_kfunc_map_value_lookup(struct task_struct *p)
+{
+ s32 pid;
+ long status;
+
+ status = bpf_probe_read_kernel(&pid, sizeof(pid), &p->pid);
+ if (status)
+ return NULL;
+
+ return bpf_map_lookup_elem(&__tasks_kfunc_map, &pid);
+}
+
+static inline int tasks_kfunc_map_insert(struct task_struct *p)
+{
+ struct __tasks_kfunc_map_value local, *v;
+ long status;
+ struct task_struct *acquired, *old;
+ s32 pid;
+
+ status = bpf_probe_read_kernel(&pid, sizeof(pid), &p->pid);
+ if (status)
+ return status;
+
+ local.task = NULL;
+ status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST);
+ if (status)
+ return status;
+
+ v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid);
+ if (!v) {
+ bpf_map_delete_elem(&__tasks_kfunc_map, &pid);
+ return -ENOENT;
+ }
+
+ acquired = bpf_task_acquire(p);
+ if (!acquired)
+ return -ENOENT;
+
+ old = bpf_kptr_xchg(&v->task, acquired);
+ if (old) {
+ bpf_task_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _TASK_KFUNC_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
new file mode 100644
index 000000000000..4c07ea193f72
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+#include "task_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+
+static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *task)
+{
+ int status;
+
+ status = tasks_kfunc_map_insert(task);
+ if (status)
+ return NULL;
+
+ return tasks_kfunc_map_value_lookup(task);
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+ struct __tasks_kfunc_map_value *v;
+
+ v = insert_lookup_task(task);
+ if (!v)
+ return 0;
+
+ /* Can't invoke bpf_task_acquire() on an untrusted pointer. */
+ acquired = bpf_task_acquire(v->task);
+ if (!acquired)
+ return 0;
+
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("arg#0 pointer type STRUCT task_struct must point")
+int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags;
+
+ /* Can't invoke bpf_task_acquire() on a random frame pointer. */
+ acquired = bpf_task_acquire((struct task_struct *)&stack_task);
+ if (!acquired)
+ return 0;
+
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("kretprobe/free_task")
+__failure __msg("calling kernel function bpf_task_acquire is not allowed")
+int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ /* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */
+ acquired = bpf_task_acquire(task);
+ if (!acquired)
+ return 0;
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("kretprobe/free_task")
+__failure __msg("calling kernel function bpf_task_acquire is not allowed")
+int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe_rcu, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ bpf_rcu_read_lock();
+ if (!task) {
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+ /* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */
+ acquired = bpf_task_acquire(task);
+ if (acquired)
+ bpf_task_release(acquired);
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ /* Can't invoke bpf_task_acquire() on a NULL pointer. */
+ acquired = bpf_task_acquire(NULL);
+ if (!acquired)
+ return 0;
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
+int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_acquire(task);
+
+ /* Acquired task is never released. */
+ __sink(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
+int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr;
+ struct __tasks_kfunc_map_value *v;
+
+ v = insert_lookup_task(task);
+ if (!v)
+ return 0;
+
+ kptr = bpf_kptr_xchg(&v->task, NULL);
+ if (!kptr)
+ return 0;
+
+ /* Kptr retrieved from map is never released. */
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_acquire_release_no_null_check, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_acquire(task);
+ /* Can't invoke bpf_task_release() on an acquired task without a NULL check. */
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags)
+{
+ struct __tasks_kfunc_map_value *v;
+
+ v = insert_lookup_task(task);
+ if (!v)
+ return 0;
+
+ /* Can't invoke bpf_task_release() on an untrusted pointer. */
+ bpf_task_release(v->task);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("arg#0 pointer type STRUCT task_struct must point")
+int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired = (struct task_struct *)&clone_flags;
+
+ /* Cannot release random frame pointer. */
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags)
+{
+ struct __tasks_kfunc_map_value local, *v;
+ long status;
+ struct task_struct *acquired, *old;
+ s32 pid;
+
+ status = bpf_probe_read_kernel(&pid, sizeof(pid), &task->pid);
+ if (status)
+ return 0;
+
+ local.task = NULL;
+ status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST);
+ if (status)
+ return status;
+
+ v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid);
+ if (!v)
+ return -ENOENT;
+
+ acquired = bpf_task_acquire(task);
+ if (!acquired)
+ return -EEXIST;
+
+ old = bpf_kptr_xchg(&v->task, acquired);
+
+ /* old cannot be passed to bpf_task_release() without a NULL check. */
+ bpf_task_release(old);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("release kernel function bpf_task_release expects")
+int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags)
+{
+ /* Cannot release trusted task pointer which was not acquired. */
+ bpf_task_release(task);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_from_pid(task->pid);
+
+ /* Releasing bpf_task_from_pid() lookup without a NULL check. */
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_from_vpid_no_null_check, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_from_vpid(task->pid);
+
+ /* Releasing bpf_task_from_vpid() lookup without a NULL check. */
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("lsm/task_free")
+__failure __msg("R1 must be a rcu pointer")
+int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task)
+{
+ struct task_struct *acquired;
+
+ /* the argument of lsm task_free hook is untrusted. */
+ acquired = bpf_task_acquire(task);
+ if (!acquired)
+ return 0;
+
+ bpf_task_release(acquired);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("access beyond the end of member comm")
+int BPF_PROG(task_access_comm1, struct task_struct *task, u64 clone_flags)
+{
+ bpf_strncmp(task->comm, 17, "foo");
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("access beyond the end of member comm")
+int BPF_PROG(task_access_comm2, struct task_struct *task, u64 clone_flags)
+{
+ bpf_strncmp(task->comm + 1, 16, "foo");
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("write into memory")
+int BPF_PROG(task_access_comm3, struct task_struct *task, u64 clone_flags)
+{
+ bpf_probe_read_kernel(task->comm, 16, task->comm);
+ return 0;
+}
+
+SEC("fentry/__set_task_comm")
+__failure __msg("R1 type=ptr_ expected")
+int BPF_PROG(task_access_comm4, struct task_struct *task, const char *buf, bool exec)
+{
+ /*
+ * task->comm is a legacy ptr_to_btf_id. The verifier cannot guarantee
+ * its safety. Hence it cannot be accessed with normal load insns.
+ */
+ bpf_strncmp(task->comm, 16, "foo");
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("R1 must be referenced or trusted")
+int BPF_PROG(task_kfunc_release_in_map, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *local;
+ struct __tasks_kfunc_map_value *v;
+
+ if (tasks_kfunc_map_insert(task))
+ return 0;
+
+ v = tasks_kfunc_map_value_lookup(task);
+ if (!v)
+ return 0;
+
+ bpf_rcu_read_lock();
+ local = v->task;
+ if (!local) {
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+ /* Can't release a kptr that's still stored in a map. */
+ bpf_task_release(local);
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c
new file mode 100644
index 000000000000..5fb4fc19d26a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "../bpf_experimental.h"
+#include "task_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int err, pid;
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
+
+struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak;
+/* The two-param bpf_task_acquire doesn't exist */
+struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak;
+/* Incorrect type for first param */
+struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak;
+
+void invalid_kfunc(void) __ksym __weak;
+void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
+
+static bool is_test_kfunc_task(void)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+
+ return pid == cur_pid;
+}
+
+static int test_acquire_release(struct task_struct *task)
+{
+ struct task_struct *acquired = NULL;
+
+ if (!bpf_ksym_exists(bpf_task_acquire)) {
+ err = 3;
+ return 0;
+ }
+ if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) {
+ err = 4;
+ return 0;
+ }
+ if (bpf_ksym_exists(invalid_kfunc)) {
+ /* the verifier's dead code elimination should remove this */
+ err = 5;
+ asm volatile ("goto -1"); /* for (;;); */
+ }
+
+ acquired = bpf_task_acquire(task);
+ if (acquired)
+ bpf_task_release(acquired);
+ else
+ err = 6;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired = NULL;
+ int fake_ctx = 42;
+
+ if (bpf_ksym_exists(bpf_task_acquire___one)) {
+ acquired = bpf_task_acquire___one(task);
+ } else if (bpf_ksym_exists(bpf_task_acquire___two)) {
+ /* Here, bpf_object__resolve_ksym_func_btf_id's find_ksym_btf_id
+ * call will find vmlinux's bpf_task_acquire, but subsequent
+ * bpf_core_types_are_compat will fail
+ */
+ acquired = bpf_task_acquire___two(task, &fake_ctx);
+ err = 3;
+ return 0;
+ } else if (bpf_ksym_exists(bpf_task_acquire___three)) {
+ /* bpf_core_types_are_compat will fail similarly to above case */
+ acquired = bpf_task_acquire___three(&fake_ctx);
+ err = 4;
+ return 0;
+ }
+
+ if (acquired)
+ bpf_task_release(acquired);
+ else
+ err = 5;
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags)
+{
+ /* Neither symbol should successfully resolve.
+ * Success or failure of one ___flavor should not affect others
+ */
+ if (bpf_ksym_exists(bpf_task_acquire___two))
+ err = 1;
+ else if (bpf_ksym_exists(bpf_task_acquire___three))
+ err = 2;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
+{
+ if (!is_test_kfunc_task())
+ return 0;
+
+ return test_acquire_release(task);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
+{
+ if (!is_test_kfunc_task())
+ return 0;
+
+ return test_acquire_release(bpf_get_current_task_btf());
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
+{
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = tasks_kfunc_map_insert(task);
+ if (status)
+ err = 1;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr, *acquired;
+ struct __tasks_kfunc_map_value *v, *local;
+ int refcnt, refcnt_after_drop;
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = tasks_kfunc_map_insert(task);
+ if (status) {
+ err = 1;
+ return 0;
+ }
+
+ v = tasks_kfunc_map_value_lookup(task);
+ if (!v) {
+ err = 2;
+ return 0;
+ }
+
+ kptr = bpf_kptr_xchg(&v->task, NULL);
+ if (!kptr) {
+ err = 3;
+ return 0;
+ }
+
+ local = bpf_obj_new(typeof(*local));
+ if (!local) {
+ err = 4;
+ bpf_task_release(kptr);
+ return 0;
+ }
+
+ kptr = bpf_kptr_xchg(&local->task, kptr);
+ if (kptr) {
+ err = 5;
+ bpf_obj_drop(local);
+ bpf_task_release(kptr);
+ return 0;
+ }
+
+ kptr = bpf_kptr_xchg(&local->task, NULL);
+ if (!kptr) {
+ err = 6;
+ bpf_obj_drop(local);
+ return 0;
+ }
+
+ /* Stash a copy into local kptr and check if it is released recursively */
+ acquired = bpf_task_acquire(kptr);
+ if (!acquired) {
+ err = 7;
+ bpf_obj_drop(local);
+ bpf_task_release(kptr);
+ return 0;
+ }
+ bpf_probe_read_kernel(&refcnt, sizeof(refcnt), &acquired->rcu_users);
+
+ acquired = bpf_kptr_xchg(&local->task, acquired);
+ if (acquired) {
+ err = 8;
+ bpf_obj_drop(local);
+ bpf_task_release(kptr);
+ bpf_task_release(acquired);
+ return 0;
+ }
+
+ bpf_obj_drop(local);
+
+ bpf_probe_read_kernel(&refcnt_after_drop, sizeof(refcnt_after_drop), &kptr->rcu_users);
+ if (refcnt != refcnt_after_drop + 1) {
+ err = 9;
+ bpf_task_release(kptr);
+ return 0;
+ }
+
+ bpf_task_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr;
+ struct __tasks_kfunc_map_value *v;
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = tasks_kfunc_map_insert(task);
+ if (status) {
+ err = 1;
+ return 0;
+ }
+
+ v = tasks_kfunc_map_value_lookup(task);
+ if (!v) {
+ err = 2;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ kptr = v->task;
+ if (!kptr) {
+ err = 3;
+ } else {
+ kptr = bpf_task_acquire(kptr);
+ if (!kptr)
+ err = 4;
+ else
+ bpf_task_release(kptr);
+ }
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *current, *acquired;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ current = bpf_get_current_task_btf();
+ acquired = bpf_task_acquire(current);
+ if (acquired)
+ bpf_task_release(acquired);
+ else
+ err = 1;
+
+ return 0;
+}
+
+static void lookup_compare_pid(const struct task_struct *p)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_from_pid(p->pid);
+ if (!acquired) {
+ err = 1;
+ return;
+ }
+
+ if (acquired->pid != p->pid)
+ err = 2;
+ bpf_task_release(acquired);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
+{
+ if (!is_test_kfunc_task())
+ return 0;
+
+ lookup_compare_pid(task);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
+{
+ if (!is_test_kfunc_task())
+ return 0;
+
+ lookup_compare_pid(bpf_get_current_task_btf());
+ return 0;
+}
+
+static int is_pid_lookup_valid(s32 pid)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_from_pid(pid);
+ if (acquired) {
+ bpf_task_release(acquired);
+ return 1;
+ }
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
+{
+ if (!is_test_kfunc_task())
+ return 0;
+
+ bpf_strncmp(task->comm, 12, "foo");
+ bpf_strncmp(task->comm, 16, "foo");
+ bpf_strncmp(&task->comm[8], 4, "foo");
+
+ if (is_pid_lookup_valid(-1)) {
+ err = 1;
+ return 0;
+ }
+
+ if (is_pid_lookup_valid(0xcafef00d)) {
+ err = 2;
+ return 0;
+ }
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ /* task->group_leader is listed as a trusted, non-NULL field of task struct. */
+ acquired = bpf_task_acquire(task->group_leader);
+ if (acquired)
+ bpf_task_release(acquired);
+ else
+ err = 1;
+
+
+ return 0;
+}
+
+SEC("syscall")
+int test_task_from_vpid_current(const void *ctx)
+{
+ struct task_struct *current, *v_task;
+
+ v_task = bpf_task_from_vpid(1);
+ if (!v_task) {
+ err = 1;
+ return 0;
+ }
+
+ current = bpf_get_current_task_btf();
+
+ /* The current process should be the init process (pid 1) in the new pid namespace. */
+ if (current != v_task)
+ err = 2;
+
+ bpf_task_release(v_task);
+ return 0;
+}
+
+SEC("syscall")
+int test_task_from_vpid_invalid(const void *ctx)
+{
+ struct task_struct *v_task;
+
+ v_task = bpf_task_from_vpid(-1);
+ if (v_task) {
+ err = 1;
+ goto err;
+ }
+
+ /* There should be only one process (current process) in the new pid namespace. */
+ v_task = bpf_task_from_vpid(2);
+ if (v_task) {
+ err = 2;
+ goto err;
+ }
+
+ v_task = bpf_task_from_vpid(9999);
+ if (v_task) {
+ err = 3;
+ goto err;
+ }
+
+ return 0;
+err:
+ bpf_task_release(v_task);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_local_storage.c b/tools/testing/selftests/bpf/progs/task_local_storage.c
new file mode 100644
index 000000000000..80a0a20db88d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_local_storage.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} enter_id SEC(".maps");
+
+#define MAGIC_VALUE 0xabcd1234
+
+pid_t target_pid = 0;
+int mismatch_cnt = 0;
+int enter_cnt = 0;
+int exit_cnt = 0;
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ ptr = bpf_task_storage_get(&enter_id, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return 0;
+
+ __sync_fetch_and_add(&enter_cnt, 1);
+ *ptr = MAGIC_VALUE + enter_cnt;
+
+ return 0;
+}
+
+SEC("tp_btf/sys_exit")
+int BPF_PROG(on_exit, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ ptr = bpf_task_storage_get(&enter_id, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return 0;
+
+ __sync_fetch_and_add(&exit_cnt, 1);
+ if (*ptr != MAGIC_VALUE + exit_cnt)
+ __sync_fetch_and_add(&mismatch_cnt, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c b/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c
new file mode 100644
index 000000000000..41d88ed222ff
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, __u64);
+} task_storage SEC(".maps");
+
+int run_count = 0;
+int valid_ptr_count = 0;
+int null_ptr_count = 0;
+
+SEC("fentry/exit_creds")
+int BPF_PROG(trace_exit_creds, struct task_struct *task)
+{
+ __u64 *ptr;
+
+ ptr = bpf_task_storage_get(&task_storage, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ __sync_fetch_and_add(&valid_ptr_count, 1);
+ else
+ __sync_fetch_and_add(&null_ptr_count, 1);
+
+ __sync_fetch_and_add(&run_count, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_ls_recursion.c b/tools/testing/selftests/bpf/progs/task_ls_recursion.c
new file mode 100644
index 000000000000..f1853c38aada
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_ls_recursion.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#ifndef EBUSY
+#define EBUSY 16
+#endif
+
+char _license[] SEC("license") = "GPL";
+int nr_del_errs = 0;
+int test_pid = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_b SEC(".maps");
+
+SEC("fentry/bpf_local_storage_update")
+int BPF_PROG(on_update)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ long *ptr;
+
+ if (!test_pid || task->pid != test_pid)
+ return 0;
+
+ ptr = bpf_task_storage_get(&map_a, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ /* ptr will not be NULL when it is called from
+ * the bpf_task_storage_get(&map_b,...F_CREATE) in
+ * the BPF_PROG(on_enter) below. It is because
+ * the value can be found in map_a and the kernel
+ * does not need to acquire any spin_lock.
+ */
+ if (ptr) {
+ int err;
+
+ *ptr += 1;
+ err = bpf_task_storage_delete(&map_a, task);
+ if (err == -EBUSY)
+ nr_del_errs++;
+ }
+
+ /* This will still fail because map_b is empty and
+ * this BPF_PROG(on_update) has failed to acquire
+ * the percpu busy lock => meaning potential
+ * deadlock is detected and it will fail to create
+ * new storage.
+ */
+ ptr = bpf_task_storage_get(&map_b, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr += 1;
+
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (!test_pid || task->pid != test_pid)
+ return 0;
+
+ ptr = bpf_task_storage_get(&map_a, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr && !*ptr)
+ *ptr = 200;
+
+ ptr = bpf_task_storage_get(&map_b, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr && !*ptr)
+ *ptr = 100;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_ls_uptr.c b/tools/testing/selftests/bpf/progs/task_ls_uptr.c
new file mode 100644
index 000000000000..ddbe11b46eef
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_ls_uptr.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "uptr_test_common.h"
+
+struct task_struct *bpf_task_from_pid(s32 pid) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct value_type);
+} datamap SEC(".maps");
+
+pid_t target_pid = 0;
+pid_t parent_pid = 0;
+
+SEC("tp_btf/sys_enter")
+int on_enter(__u64 *ctx)
+{
+ struct task_struct *task, *data_task;
+ struct value_type *ptr;
+ struct user_data *udata;
+ struct cgroup *cgrp;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ data_task = bpf_task_from_pid(parent_pid);
+ if (!data_task)
+ return 0;
+
+ ptr = bpf_task_storage_get(&datamap, data_task, 0, 0);
+ bpf_task_release(data_task);
+ if (!ptr)
+ return 0;
+
+ cgrp = bpf_kptr_xchg(&ptr->cgrp, NULL);
+ if (cgrp) {
+ int lvl = cgrp->level;
+
+ bpf_cgroup_release(cgrp);
+ return lvl;
+ }
+
+ udata = ptr->udata;
+ if (!udata || udata->result)
+ return 0;
+ udata->result = MAGIC_VALUE + udata->a + udata->b;
+
+ udata = ptr->nested.udata;
+ if (udata && !udata->nested_result)
+ udata->nested_result = udata->result;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c b/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c
new file mode 100644
index 000000000000..986829aaf73a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#ifndef EBUSY
+#define EBUSY 16
+#endif
+
+extern bool CONFIG_PREEMPTION __kconfig __weak;
+int nr_get_errs = 0;
+int nr_del_errs = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} task_storage SEC(".maps");
+
+SEC("lsm.s/socket_post_create")
+int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
+ int protocol, int kern)
+{
+ struct task_struct *task;
+ int ret, zero = 0;
+ int *value;
+
+ if (!CONFIG_PREEMPTION)
+ return 0;
+
+ task = bpf_get_current_task_btf();
+ value = bpf_task_storage_get(&task_storage, task, &zero,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!value)
+ __sync_fetch_and_add(&nr_get_errs, 1);
+
+ ret = bpf_task_storage_delete(&task_storage,
+ bpf_get_current_task_btf());
+ if (ret == -EBUSY)
+ __sync_fetch_and_add(&nr_del_errs, 1);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
new file mode 100644
index 000000000000..fe6249d99b31
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__noinline
+int subprog_tc(struct __sk_buff *skb)
+{
+ int ret = 1;
+
+ __sink(skb);
+ __sink(ret);
+ /* let verifier know that 'subprog_tc' can change pointers to skb->data */
+ bpf_skb_change_proto(skb, 0, 0);
+ return ret;
+}
+
+SEC("tc")
+int entry_tc(struct __sk_buff *skb)
+{
+ return subprog_tc(skb);
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tc_dummy.c b/tools/testing/selftests/bpf/progs/tc_dummy.c
new file mode 100644
index 000000000000..69a3d0dc8787
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tc_dummy.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ return 1;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c b/tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c
new file mode 100644
index 000000000000..0016c90e9c13
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops")
+__u32 BPF_PROG(incompl_cong_ops_ssthresh, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_ssthresh;
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(incompl_cong_ops_undo_cwnd, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_cwnd;
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops incompl_cong_ops = {
+ /* Intentionally leaving out any of the required cong_avoid() and
+ * cong_control() here.
+ */
+ .ssthresh = (void *)incompl_cong_ops_ssthresh,
+ .undo_cwnd = (void *)incompl_cong_ops_undo_cwnd,
+ .name = "bpf_incompl_ops",
+};
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c b/tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c
new file mode 100644
index 000000000000..f95862f570b7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+
+extern void bbr_init(struct sock *sk) __ksym;
+extern void bbr_main(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs) __ksym;
+extern u32 bbr_sndbuf_expand(struct sock *sk) __ksym;
+extern u32 bbr_undo_cwnd(struct sock *sk) __ksym;
+extern void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym;
+extern u32 bbr_ssthresh(struct sock *sk) __ksym;
+extern u32 bbr_min_tso_segs(struct sock *sk) __ksym;
+extern void bbr_set_state(struct sock *sk, u8 new_state) __ksym;
+
+extern void dctcp_init(struct sock *sk) __ksym;
+extern void dctcp_update_alpha(struct sock *sk, u32 flags) __ksym;
+extern void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) __ksym;
+extern u32 dctcp_ssthresh(struct sock *sk) __ksym;
+extern u32 dctcp_cwnd_undo(struct sock *sk) __ksym;
+extern void dctcp_state(struct sock *sk, u8 new_state) __ksym;
+
+extern void cubictcp_init(struct sock *sk) __ksym;
+extern u32 cubictcp_recalc_ssthresh(struct sock *sk) __ksym;
+extern void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) __ksym;
+extern void cubictcp_state(struct sock *sk, u8 new_state) __ksym;
+extern void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym;
+extern void cubictcp_acked(struct sock *sk, const struct ack_sample *sample) __ksym;
+
+SEC("struct_ops")
+void BPF_PROG(init, struct sock *sk)
+{
+ bbr_init(sk);
+ dctcp_init(sk);
+ cubictcp_init(sk);
+}
+
+SEC("struct_ops")
+void BPF_PROG(in_ack_event, struct sock *sk, u32 flags)
+{
+ dctcp_update_alpha(sk, flags);
+}
+
+SEC("struct_ops")
+void BPF_PROG(cong_control, struct sock *sk, u32 ack, int flag, const struct rate_sample *rs)
+{
+ bbr_main(sk, ack, flag, rs);
+}
+
+SEC("struct_ops")
+void BPF_PROG(cong_avoid, struct sock *sk, u32 ack, u32 acked)
+{
+ cubictcp_cong_avoid(sk, ack, acked);
+}
+
+SEC("struct_ops")
+u32 BPF_PROG(sndbuf_expand, struct sock *sk)
+{
+ return bbr_sndbuf_expand(sk);
+}
+
+SEC("struct_ops")
+u32 BPF_PROG(undo_cwnd, struct sock *sk)
+{
+ bbr_undo_cwnd(sk);
+ return dctcp_cwnd_undo(sk);
+}
+
+SEC("struct_ops")
+void BPF_PROG(cwnd_event, struct sock *sk, enum tcp_ca_event event)
+{
+ bbr_cwnd_event(sk, event);
+ dctcp_cwnd_event(sk, event);
+ cubictcp_cwnd_event(sk, event);
+}
+
+SEC("struct_ops")
+u32 BPF_PROG(ssthresh, struct sock *sk)
+{
+ bbr_ssthresh(sk);
+ dctcp_ssthresh(sk);
+ return cubictcp_recalc_ssthresh(sk);
+}
+
+SEC("struct_ops")
+u32 BPF_PROG(min_tso_segs, struct sock *sk)
+{
+ return bbr_min_tso_segs(sk);
+}
+
+SEC("struct_ops")
+void BPF_PROG(set_state, struct sock *sk, u8 new_state)
+{
+ bbr_set_state(sk, new_state);
+ dctcp_state(sk, new_state);
+ cubictcp_state(sk, new_state);
+}
+
+SEC("struct_ops")
+void BPF_PROG(pkts_acked, struct sock *sk, const struct ack_sample *sample)
+{
+ cubictcp_acked(sk, sample);
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops tcp_ca_kfunc = {
+ .init = (void *)init,
+ .in_ack_event = (void *)in_ack_event,
+ .cong_control = (void *)cong_control,
+ .cong_avoid = (void *)cong_avoid,
+ .sndbuf_expand = (void *)sndbuf_expand,
+ .undo_cwnd = (void *)undo_cwnd,
+ .cwnd_event = (void *)cwnd_event,
+ .ssthresh = (void *)ssthresh,
+ .min_tso_segs = (void *)min_tso_segs,
+ .set_state = (void *)set_state,
+ .pkts_acked = (void *)pkts_acked,
+ .name = "tcp_ca_kfunc",
+};
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c b/tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c
new file mode 100644
index 000000000000..54f916a931c6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops")
+size_t BPF_PROG(unsupp_cong_op_get_info, struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info)
+{
+ return 0;
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops unsupp_cong_op = {
+ .get_info = (void *)unsupp_cong_op_get_info,
+ .name = "bpf_unsupp_op",
+};
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_update.c b/tools/testing/selftests/bpf/progs/tcp_ca_update.c
new file mode 100644
index 000000000000..e4bd82bc0d01
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_update.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int ca1_cnt = 0;
+int ca2_cnt = 0;
+
+SEC("struct_ops")
+void BPF_PROG(ca_update_1_init, struct sock *sk)
+{
+ ca1_cnt++;
+}
+
+SEC("struct_ops")
+void BPF_PROG(ca_update_2_init, struct sock *sk)
+{
+ ca2_cnt++;
+}
+
+SEC("struct_ops")
+void BPF_PROG(ca_update_cong_control, struct sock *sk,
+ const struct rate_sample *rs)
+{
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(ca_update_ssthresh, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_ssthresh;
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(ca_update_undo_cwnd, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_cwnd;
+}
+
+SEC(".struct_ops.link")
+struct tcp_congestion_ops ca_update_1 = {
+ .init = (void *)ca_update_1_init,
+ .cong_control = (void *)ca_update_cong_control,
+ .ssthresh = (void *)ca_update_ssthresh,
+ .undo_cwnd = (void *)ca_update_undo_cwnd,
+ .name = "tcp_ca_update",
+};
+
+SEC(".struct_ops.link")
+struct tcp_congestion_ops ca_update_2 = {
+ .init = (void *)ca_update_2_init,
+ .cong_control = (void *)ca_update_cong_control,
+ .ssthresh = (void *)ca_update_ssthresh,
+ .undo_cwnd = (void *)ca_update_undo_cwnd,
+ .name = "tcp_ca_update",
+};
+
+SEC(".struct_ops.link")
+struct tcp_congestion_ops ca_wrong = {
+ .cong_control = (void *)ca_update_cong_control,
+ .ssthresh = (void *)ca_update_ssthresh,
+ .undo_cwnd = (void *)ca_update_undo_cwnd,
+ .name = "tcp_ca_wrong",
+};
+
+SEC(".struct_ops")
+struct tcp_congestion_ops ca_no_link = {
+ .cong_control = (void *)ca_update_cong_control,
+ .ssthresh = (void *)ca_update_ssthresh,
+ .undo_cwnd = (void *)ca_update_undo_cwnd,
+ .name = "tcp_ca_no_link",
+};
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
new file mode 100644
index 000000000000..a58b5194fc89
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define USEC_PER_SEC 1000000UL
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+static unsigned int tcp_left_out(const struct tcp_sock *tp)
+{
+ return tp->sacked_out + tp->lost_out;
+}
+
+static unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
+{
+ return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
+}
+
+SEC("struct_ops")
+void BPF_PROG(write_sk_pacing_init, struct sock *sk)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+ __sync_bool_compare_and_swap(&sk->sk_pacing_status, SK_PACING_NONE,
+ SK_PACING_NEEDED);
+#else
+ sk->sk_pacing_status = SK_PACING_NEEDED;
+#endif
+}
+
+SEC("struct_ops")
+void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
+ const struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long rate =
+ ((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) /
+ (tp->srtt_us ?: 1U << 3);
+ sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate);
+ tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1;
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(write_sk_pacing_ssthresh, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_ssthresh;
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(write_sk_pacing_undo_cwnd, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_cwnd;
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops write_sk_pacing = {
+ .init = (void *)write_sk_pacing_init,
+ .cong_control = (void *)write_sk_pacing_cong_control,
+ .ssthresh = (void *)write_sk_pacing_ssthresh,
+ .undo_cwnd = (void *)write_sk_pacing_undo_cwnd,
+ .name = "bpf_w_sk_pacing",
+};
diff --git a/tools/testing/selftests/bpf/progs/tcp_rtt.c b/tools/testing/selftests/bpf/progs/tcp_rtt.c
index 0cb3204ddb18..42c729f85524 100644
--- a/tools/testing/selftests/bpf/progs/tcp_rtt.c
+++ b/tools/testing/selftests/bpf/progs/tcp_rtt.c
@@ -3,7 +3,6 @@
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
struct tcp_rtt_storage {
__u32 invoked;
@@ -11,6 +10,9 @@ struct tcp_rtt_storage {
__u32 delivered;
__u32 delivered_ce;
__u32 icsk_retransmits;
+
+ __u32 mrtt_us; /* args[0] */
+ __u32 srtt; /* args[1] */
};
struct {
@@ -56,5 +58,8 @@ int _sockops(struct bpf_sock_ops *ctx)
storage->delivered_ce = tcp_sk->delivered_ce;
storage->icsk_retransmits = tcp_sk->icsk_retransmits;
+ storage->mrtt_us = ctx->args[0];
+ storage->srtt = ctx->args[1];
+
return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/test_access_variable_array.c b/tools/testing/selftests/bpf/progs/test_access_variable_array.c
new file mode 100644
index 000000000000..326b7d1f496a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_access_variable_array.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Bytedance */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+unsigned long span = 0;
+
+SEC("fentry/sched_balance_rq")
+int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq,
+ struct sched_domain *sd)
+{
+ span = sd->span[0];
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_assign_reuse.c b/tools/testing/selftests/bpf/progs/test_assign_reuse.c
new file mode 100644
index 000000000000..4f2e2321ea06
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_assign_reuse.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/pkt_cls.h>
+
+char LICENSE[] SEC("license") = "GPL";
+
+__u64 sk_cookie_seen;
+__u64 reuseport_executed;
+union {
+ struct tcphdr tcp;
+ struct udphdr udp;
+} headers;
+
+const volatile __u16 dest_port;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} sk_map SEC(".maps");
+
+SEC("sk_reuseport")
+int reuse_accept(struct sk_reuseport_md *ctx)
+{
+ reuseport_executed++;
+
+ if (ctx->ip_protocol == IPPROTO_TCP) {
+ if (ctx->data + sizeof(headers.tcp) > ctx->data_end)
+ return SK_DROP;
+
+ if (__builtin_memcmp(&headers.tcp, ctx->data, sizeof(headers.tcp)) != 0)
+ return SK_DROP;
+ } else if (ctx->ip_protocol == IPPROTO_UDP) {
+ if (ctx->data + sizeof(headers.udp) > ctx->data_end)
+ return SK_DROP;
+
+ if (__builtin_memcmp(&headers.udp, ctx->data, sizeof(headers.udp)) != 0)
+ return SK_DROP;
+ } else {
+ return SK_DROP;
+ }
+
+ sk_cookie_seen = bpf_get_socket_cookie(ctx->sk);
+ return SK_PASS;
+}
+
+SEC("sk_reuseport")
+int reuse_drop(struct sk_reuseport_md *ctx)
+{
+ reuseport_executed++;
+ sk_cookie_seen = 0;
+ return SK_DROP;
+}
+
+static int
+assign_sk(struct __sk_buff *skb)
+{
+ int zero = 0, ret = 0;
+ struct bpf_sock *sk;
+
+ sk = bpf_map_lookup_elem(&sk_map, &zero);
+ if (!sk)
+ return TC_ACT_SHOT;
+ ret = bpf_sk_assign(skb, sk, 0);
+ bpf_sk_release(sk);
+ return ret ? TC_ACT_SHOT : TC_ACT_OK;
+}
+
+static bool
+maybe_assign_tcp(struct __sk_buff *skb, struct tcphdr *th)
+{
+ if (th + 1 > (void *)(long)(skb->data_end))
+ return TC_ACT_SHOT;
+
+ if (!th->syn || th->ack || th->dest != bpf_htons(dest_port))
+ return TC_ACT_OK;
+
+ __builtin_memcpy(&headers.tcp, th, sizeof(headers.tcp));
+ return assign_sk(skb);
+}
+
+static bool
+maybe_assign_udp(struct __sk_buff *skb, struct udphdr *uh)
+{
+ if (uh + 1 > (void *)(long)(skb->data_end))
+ return TC_ACT_SHOT;
+
+ if (uh->dest != bpf_htons(dest_port))
+ return TC_ACT_OK;
+
+ __builtin_memcpy(&headers.udp, uh, sizeof(headers.udp));
+ return assign_sk(skb);
+}
+
+SEC("tc")
+int tc_main(struct __sk_buff *skb)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ struct ethhdr *eth;
+
+ eth = (struct ethhdr *)(data);
+ if (eth + 1 > data_end)
+ return TC_ACT_SHOT;
+
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)(data + sizeof(*eth));
+
+ if (iph + 1 > data_end)
+ return TC_ACT_SHOT;
+
+ if (iph->protocol == IPPROTO_TCP)
+ return maybe_assign_tcp(skb, (struct tcphdr *)(iph + 1));
+ else if (iph->protocol == IPPROTO_UDP)
+ return maybe_assign_udp(skb, (struct udphdr *)(iph + 1));
+ else
+ return TC_ACT_SHOT;
+ } else {
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + sizeof(*eth));
+
+ if (ip6h + 1 > data_end)
+ return TC_ACT_SHOT;
+
+ if (ip6h->nexthdr == IPPROTO_TCP)
+ return maybe_assign_tcp(skb, (struct tcphdr *)(ip6h + 1));
+ else if (ip6h->nexthdr == IPPROTO_UDP)
+ return maybe_assign_udp(skb, (struct udphdr *)(ip6h + 1));
+ else
+ return TC_ACT_SHOT;
+ }
+}
diff --git a/tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c b/tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c
new file mode 100644
index 000000000000..f548b7446218
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+int kprobe_res = 0;
+
+/**
+ * This program will be manually made sleepable on the userspace side
+ * and should thus be unattachable.
+ */
+SEC("kprobe/" SYS_PREFIX "sys_nanosleep")
+int handle_kprobe_sleepable(struct pt_regs *ctx)
+{
+ kprobe_res = 1;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c
index 8056a4c6d918..fb79e6cab932 100644
--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
@@ -1,42 +1,183 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
-#include <linux/ptrace.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include <errno.h>
+#include "bpf_misc.h"
-int kprobe_res = 0;
-int kretprobe_res = 0;
-int uprobe_res = 0;
-int uretprobe_res = 0;
+u32 dynamic_sz = 1;
+int kprobe2_res = 0;
+int kretprobe2_res = 0;
+int uprobe_byname_res = 0;
+int uretprobe_byname_res = 0;
+int uprobe_byname2_res = 0;
+int uretprobe_byname2_res = 0;
+int uprobe_byname3_sleepable_res = 0;
+int uprobe_byname3_str_sleepable_res = 0;
+int uprobe_byname3_res = 0;
+int uretprobe_byname3_sleepable_res = 0;
+int uretprobe_byname3_str_sleepable_res = 0;
+int uretprobe_byname3_res = 0;
+void *user_ptr = 0;
-SEC("kprobe/sys_nanosleep")
-int handle_kprobe(struct pt_regs *ctx)
+int bpf_copy_from_user_str(void *dst, u32, const void *, u64) __weak __ksym;
+
+SEC("ksyscall/nanosleep")
+int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __kernel_timespec *rem)
+{
+ kprobe2_res = 11;
+ return 0;
+}
+
+SEC("kretsyscall/nanosleep")
+int BPF_KRETPROBE(handle_kretprobe_auto, int ret)
+{
+ kretprobe2_res = 22;
+ return ret;
+}
+
+SEC("uprobe")
+int handle_uprobe_ref_ctr(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("uretprobe")
+int handle_uretprobe_ref_ctr(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("uprobe")
+int handle_uprobe_byname(struct pt_regs *ctx)
+{
+ uprobe_byname_res = 5;
+ return 0;
+}
+
+/* use auto-attach format for section definition. */
+SEC("uretprobe//proc/self/exe:trigger_func2")
+int handle_uretprobe_byname(struct pt_regs *ctx)
+{
+ uretprobe_byname_res = 6;
+ return 0;
+}
+
+SEC("uprobe")
+int BPF_UPROBE(handle_uprobe_byname2, const char *pathname, const char *mode)
+{
+ char mode_buf[2] = {};
+
+ /* verify fopen mode */
+ bpf_probe_read_user(mode_buf, sizeof(mode_buf), mode);
+ if (mode_buf[0] == 'r' && mode_buf[1] == 0)
+ uprobe_byname2_res = 7;
+ return 0;
+}
+
+SEC("uretprobe")
+int BPF_URETPROBE(handle_uretprobe_byname2, void *ret)
+{
+ uretprobe_byname2_res = 8;
+ return 0;
+}
+
+static __always_inline bool verify_sleepable_user_copy(void)
+{
+ char data[9];
+
+ bpf_copy_from_user(data, sizeof(data), user_ptr);
+ return bpf_strncmp(data, sizeof(data), "test_data") == 0;
+}
+
+static __always_inline bool verify_sleepable_user_copy_str(void)
+{
+ int ret;
+ char data_long[20];
+ char data_long_pad[20];
+ char data_long_err[20];
+ char data_short[4];
+ char data_short_pad[4];
+
+ ret = bpf_copy_from_user_str(data_short, sizeof(data_short), user_ptr, 0);
+
+ if (bpf_strncmp(data_short, 4, "tes\0") != 0 || ret != 4)
+ return false;
+
+ ret = bpf_copy_from_user_str(data_short_pad, sizeof(data_short_pad), user_ptr, BPF_F_PAD_ZEROS);
+
+ if (bpf_strncmp(data_short, 4, "tes\0") != 0 || ret != 4)
+ return false;
+
+ /* Make sure this passes the verifier */
+ ret = bpf_copy_from_user_str(data_long, dynamic_sz & sizeof(data_long), user_ptr, 0);
+
+ if (ret != 0)
+ return false;
+
+ ret = bpf_copy_from_user_str(data_long, sizeof(data_long), user_ptr, 0);
+
+ if (bpf_strncmp(data_long, 10, "test_data\0") != 0 || ret != 10)
+ return false;
+
+ ret = bpf_copy_from_user_str(data_long_pad, sizeof(data_long_pad), user_ptr, BPF_F_PAD_ZEROS);
+
+ if (bpf_strncmp(data_long_pad, 10, "test_data\0") != 0 || ret != 10 || data_long_pad[19] != '\0')
+ return false;
+
+ ret = bpf_copy_from_user_str(data_long_err, sizeof(data_long_err), (void *)data_long, BPF_F_PAD_ZEROS);
+
+ if (ret > 0 || data_long_err[19] != '\0')
+ return false;
+
+ ret = bpf_copy_from_user_str(data_long, sizeof(data_long), user_ptr, 2);
+
+ if (ret != -EINVAL)
+ return false;
+
+ return true;
+}
+
+SEC("uprobe.s//proc/self/exe:trigger_func3")
+int handle_uprobe_byname3_sleepable(struct pt_regs *ctx)
{
- kprobe_res = 1;
+ if (verify_sleepable_user_copy())
+ uprobe_byname3_sleepable_res = 9;
+ if (verify_sleepable_user_copy_str())
+ uprobe_byname3_str_sleepable_res = 10;
return 0;
}
-SEC("kretprobe/sys_nanosleep")
-int BPF_KRETPROBE(handle_kretprobe)
+/**
+ * same target as the uprobe.s above to force sleepable and non-sleepable
+ * programs in the same bpf_prog_array
+ */
+SEC("uprobe//proc/self/exe:trigger_func3")
+int handle_uprobe_byname3(struct pt_regs *ctx)
{
- kretprobe_res = 2;
+ uprobe_byname3_res = 11;
return 0;
}
-SEC("uprobe/trigger_func")
-int handle_uprobe(struct pt_regs *ctx)
+SEC("uretprobe.s//proc/self/exe:trigger_func3")
+int handle_uretprobe_byname3_sleepable(struct pt_regs *ctx)
{
- uprobe_res = 3;
+ if (verify_sleepable_user_copy())
+ uretprobe_byname3_sleepable_res = 12;
+ if (verify_sleepable_user_copy_str())
+ uretprobe_byname3_str_sleepable_res = 13;
return 0;
}
-SEC("uretprobe/trigger_func")
-int handle_uretprobe(struct pt_regs *ctx)
+SEC("uretprobe//proc/self/exe:trigger_func3")
+int handle_uretprobe_byname3(struct pt_regs *ctx)
{
- uretprobe_res = 4;
+ uretprobe_byname3_res = 14;
return 0;
}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe_manual.c b/tools/testing/selftests/bpf/progs/test_attach_probe_manual.c
new file mode 100644
index 000000000000..7f08bce94596
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe_manual.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+int kprobe_res = 0;
+int kretprobe_res = 0;
+int uprobe_res = 0;
+int uretprobe_res = 0;
+int uprobe_byname_res = 0;
+void *user_ptr = 0;
+
+SEC("kprobe")
+int handle_kprobe(struct pt_regs *ctx)
+{
+ kprobe_res = 1;
+ return 0;
+}
+
+SEC("kretprobe")
+int handle_kretprobe(struct pt_regs *ctx)
+{
+ kretprobe_res = 2;
+ return 0;
+}
+
+SEC("uprobe")
+int handle_uprobe(struct pt_regs *ctx)
+{
+ uprobe_res = 3;
+ return 0;
+}
+
+SEC("uretprobe")
+int handle_uretprobe(struct pt_regs *ctx)
+{
+ uretprobe_res = 4;
+ return 0;
+}
+
+SEC("uprobe")
+int handle_uprobe_byname(struct pt_regs *ctx)
+{
+ uprobe_byname_res = 5;
+ return 0;
+}
+
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_autoattach.c b/tools/testing/selftests/bpf/progs/test_autoattach.c
new file mode 100644
index 000000000000..11a44493ebce
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_autoattach.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+
+bool prog1_called = false;
+bool prog2_called = false;
+
+SEC("raw_tp/sys_enter")
+int prog1(const void *ctx)
+{
+ prog1_called = true;
+ return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int prog2(const void *ctx)
+{
+ prog2_called = true;
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_cookie.c b/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
new file mode 100644
index 000000000000..c83142b55f47
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+
+int my_tid;
+
+__u64 kprobe_res;
+__u64 kprobe_multi_res;
+__u64 kretprobe_res;
+__u64 uprobe_res;
+__u64 uretprobe_res;
+__u64 tp_res;
+__u64 pe_res;
+__u64 raw_tp_res;
+__u64 tp_btf_res;
+__u64 fentry_res;
+__u64 fexit_res;
+__u64 fmod_ret_res;
+__u64 lsm_res;
+
+static void update(void *ctx, __u64 *res)
+{
+ if (my_tid != (u32)bpf_get_current_pid_tgid())
+ return;
+
+ *res |= bpf_get_attach_cookie(ctx);
+}
+
+SEC("kprobe")
+int handle_kprobe(struct pt_regs *ctx)
+{
+ update(ctx, &kprobe_res);
+ return 0;
+}
+
+SEC("kretprobe")
+int handle_kretprobe(struct pt_regs *ctx)
+{
+ update(ctx, &kretprobe_res);
+ return 0;
+}
+
+SEC("uprobe")
+int handle_uprobe(struct pt_regs *ctx)
+{
+ update(ctx, &uprobe_res);
+ return 0;
+}
+
+SEC("uretprobe")
+int handle_uretprobe(struct pt_regs *ctx)
+{
+ update(ctx, &uretprobe_res);
+ return 0;
+}
+
+/* bpf_prog_array, used by kernel internally to keep track of attached BPF
+ * programs to a given BPF hook (e.g., for tracepoints) doesn't allow the same
+ * BPF program to be attached multiple times. So have three identical copies
+ * ready to attach to the same tracepoint.
+ */
+SEC("tp/syscalls/sys_enter_nanosleep")
+int handle_tp1(struct pt_regs *ctx)
+{
+ update(ctx, &tp_res);
+ return 0;
+}
+SEC("tp/syscalls/sys_enter_nanosleep")
+int handle_tp2(struct pt_regs *ctx)
+{
+ update(ctx, &tp_res);
+ return 0;
+}
+SEC("tp/syscalls/sys_enter_nanosleep")
+int handle_tp3(void *ctx)
+{
+ update(ctx, &tp_res);
+ return 1;
+}
+
+SEC("perf_event")
+int handle_pe(struct pt_regs *ctx)
+{
+ update(ctx, &pe_res);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_raw_tp(void *ctx)
+{
+ update(ctx, &raw_tp_res);
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+int handle_tp_btf(void *ctx)
+{
+ update(ctx, &tp_btf_res);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(fentry_test1, int a)
+{
+ update(ctx, &fentry_res);
+ return 0;
+}
+
+SEC("fexit/bpf_fentry_test1")
+int BPF_PROG(fexit_test1, int a, int ret)
+{
+ update(ctx, &fexit_res);
+ return 0;
+}
+
+SEC("fmod_ret/bpf_modify_return_test")
+int BPF_PROG(fmod_ret_test, int _a, int *_b, int _ret)
+{
+ update(ctx, &fmod_ret_res);
+ return 1234;
+}
+
+SEC("lsm/file_mprotect")
+int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot, int ret)
+{
+ if (my_tid != (u32)bpf_get_current_pid_tgid())
+ return ret;
+ update(ctx, &lsm_res);
+ return -EPERM;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
new file mode 100644
index 000000000000..4a4e0b8d9b72
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct generic_map_value {
+ void *data;
+};
+
+char _license[] SEC("license") = "GPL";
+
+const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
+const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
+
+const unsigned int percpu_data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512};
+const volatile unsigned int percpu_data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
+
+int err = 0;
+u32 pid = 0;
+
+#define DEFINE_ARRAY_WITH_KPTR(_size) \
+ struct bin_data_##_size { \
+ char data[_size - sizeof(void *)]; \
+ }; \
+ /* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */ \
+ struct bin_data_##_size *__bin_data_##_size; \
+ struct map_value_##_size { \
+ struct bin_data_##_size __kptr * data; \
+ }; \
+ struct { \
+ __uint(type, BPF_MAP_TYPE_ARRAY); \
+ __type(key, int); \
+ __type(value, struct map_value_##_size); \
+ __uint(max_entries, 128); \
+ } array_##_size SEC(".maps")
+
+#define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
+ struct percpu_bin_data_##_size { \
+ char data[_size]; \
+ }; \
+ struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \
+ struct map_value_percpu_##_size { \
+ struct percpu_bin_data_##_size __percpu_kptr * data; \
+ }; \
+ struct { \
+ __uint(type, BPF_MAP_TYPE_ARRAY); \
+ __type(key, int); \
+ __type(value, struct map_value_percpu_##_size); \
+ __uint(max_entries, 128); \
+ } array_percpu_##_size SEC(".maps")
+
+static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old, *new;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 1;
+ return;
+ }
+ new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
+ if (!new) {
+ err = 2;
+ return;
+ }
+ old = bpf_kptr_xchg(&value->data, new);
+ if (old) {
+ bpf_obj_drop(old);
+ err = 3;
+ return;
+ }
+ }
+}
+
+static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 4;
+ return;
+ }
+ old = bpf_kptr_xchg(&value->data, NULL);
+ if (!old) {
+ err = 5;
+ return;
+ }
+ bpf_obj_drop(old);
+ }
+}
+
+static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
+ unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old, *new;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 1;
+ return;
+ }
+ /* per-cpu allocator may not be able to refill in time */
+ new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL);
+ if (!new)
+ continue;
+
+ old = bpf_kptr_xchg(&value->data, new);
+ if (old) {
+ bpf_percpu_obj_drop(old);
+ err = 2;
+ return;
+ }
+ }
+}
+
+static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
+ unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 3;
+ return;
+ }
+ old = bpf_kptr_xchg(&value->data, NULL);
+ if (!old)
+ continue;
+ bpf_percpu_obj_drop(old);
+ }
+}
+
+#define CALL_BATCH_ALLOC(size, batch, idx) \
+ batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
+
+#define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
+ do { \
+ batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
+ batch_free((struct bpf_map *)(&array_##size), batch, idx); \
+ } while (0)
+
+#define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
+ batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
+
+#define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
+ do { \
+ batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
+ batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
+ } while (0)
+
+/* kptr doesn't support bin_data_8 which is a zero-sized array */
+DEFINE_ARRAY_WITH_KPTR(16);
+DEFINE_ARRAY_WITH_KPTR(32);
+DEFINE_ARRAY_WITH_KPTR(64);
+DEFINE_ARRAY_WITH_KPTR(96);
+DEFINE_ARRAY_WITH_KPTR(128);
+DEFINE_ARRAY_WITH_KPTR(192);
+DEFINE_ARRAY_WITH_KPTR(256);
+DEFINE_ARRAY_WITH_KPTR(512);
+DEFINE_ARRAY_WITH_KPTR(1024);
+DEFINE_ARRAY_WITH_KPTR(2048);
+DEFINE_ARRAY_WITH_KPTR(4096);
+
+DEFINE_ARRAY_WITH_PERCPU_KPTR(8);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int test_batch_alloc_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 16-bytes objects in batch to trigger refilling,
+ * then free 128 16-bytes objects in batch to trigger freeing.
+ */
+ CALL_BATCH_ALLOC_FREE(16, 128, 0);
+ CALL_BATCH_ALLOC_FREE(32, 128, 1);
+ CALL_BATCH_ALLOC_FREE(64, 128, 2);
+ CALL_BATCH_ALLOC_FREE(96, 128, 3);
+ CALL_BATCH_ALLOC_FREE(128, 128, 4);
+ CALL_BATCH_ALLOC_FREE(192, 128, 5);
+ CALL_BATCH_ALLOC_FREE(256, 128, 6);
+ CALL_BATCH_ALLOC_FREE(512, 64, 7);
+ CALL_BATCH_ALLOC_FREE(1024, 32, 8);
+ CALL_BATCH_ALLOC_FREE(2048, 16, 9);
+ CALL_BATCH_ALLOC_FREE(4096, 8, 10);
+
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int test_free_through_map_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 16-bytes objects in batch to trigger refilling,
+ * then free these objects through map free.
+ */
+ CALL_BATCH_ALLOC(16, 128, 0);
+ CALL_BATCH_ALLOC(32, 128, 1);
+ CALL_BATCH_ALLOC(64, 128, 2);
+ CALL_BATCH_ALLOC(96, 128, 3);
+ CALL_BATCH_ALLOC(128, 128, 4);
+ CALL_BATCH_ALLOC(192, 128, 5);
+ CALL_BATCH_ALLOC(256, 128, 6);
+ CALL_BATCH_ALLOC(512, 64, 7);
+ CALL_BATCH_ALLOC(1024, 32, 8);
+ CALL_BATCH_ALLOC(2048, 16, 9);
+ CALL_BATCH_ALLOC(4096, 8, 10);
+
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int test_batch_percpu_alloc_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
+ * then free 128 8-bytes per-cpu objects in batch to trigger freeing.
+ */
+ CALL_BATCH_PERCPU_ALLOC_FREE(8, 128, 0);
+ CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
+ CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
+ CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
+ CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
+ CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
+ CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
+ CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
+ CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
+
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int test_percpu_free_through_map_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
+ * then free these object through map free.
+ */
+ CALL_BATCH_PERCPU_ALLOC(8, 128, 0);
+ CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
+ CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
+ CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
+ CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
+ CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
+ CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
+ CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
+ CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
new file mode 100644
index 000000000000..f7b330ddd007
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define EAFNOSUPPORT 97
+#define EPROTO 71
+#define ENONET 64
+#define EINVAL 22
+#define ENOENT 2
+
+#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY)
+
+extern unsigned long CONFIG_HZ __kconfig;
+
+int test_einval_bpf_tuple = 0;
+int test_einval_reserved = 0;
+int test_einval_reserved_new = 0;
+int test_einval_netns_id = 0;
+int test_einval_len_opts = 0;
+int test_eproto_l4proto = 0;
+int test_enonet_netns_id = 0;
+int test_enoent_lookup = 0;
+int test_eafnosupport = 0;
+int test_alloc_entry = -EINVAL;
+int test_insert_entry = -EAFNOSUPPORT;
+int test_succ_lookup = -ENOENT;
+int test_ct_zone_id_alloc_entry = -EINVAL;
+int test_ct_zone_id_insert_entry = -EAFNOSUPPORT;
+int test_ct_zone_id_succ_lookup = -ENOENT;
+int test_ct_zone_dir_enoent_lookup = 0;
+int test_ct_zone_id_enoent_lookup = 0;
+u32 test_delta_timeout = 0;
+u32 test_status = 0;
+u32 test_insert_lookup_mark = 0;
+int test_snat_addr = -EINVAL;
+int test_dnat_addr = -EINVAL;
+__be32 saddr = 0;
+__be16 sport = 0;
+__be32 daddr = 0;
+__be16 dport = 0;
+int test_exist_lookup = -ENOENT;
+u32 test_exist_lookup_mark = 0;
+
+enum nf_nat_manip_type___local {
+ NF_NAT_MANIP_SRC___local,
+ NF_NAT_MANIP_DST___local
+};
+
+struct nf_conn;
+
+struct bpf_ct_opts___local {
+ s32 netns_id;
+ s32 error;
+ u8 l4proto;
+ u8 dir;
+ u8 reserved[2];
+};
+
+struct bpf_ct_opts___new {
+ s32 netns_id;
+ s32 error;
+ u8 l4proto;
+ u8 dir;
+ u16 ct_zone_id;
+ u8 ct_zone_dir;
+ u8 reserved[3];
+} __attribute__((preserve_access_index));
+
+struct nf_conn *bpf_xdp_ct_alloc(struct xdp_md *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_ct_insert_entry(struct nf_conn *) __ksym;
+void bpf_ct_release(struct nf_conn *) __ksym;
+void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym;
+int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
+int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
+int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
+int bpf_ct_set_nat_info(struct nf_conn *, union nf_inet_addr *,
+ int port, enum nf_nat_manip_type___local) __ksym;
+
+static __always_inline void
+nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32),
+ struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32),
+ void *ctx)
+{
+ struct bpf_ct_opts___local opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 };
+ struct bpf_sock_tuple bpf_tuple;
+ struct nf_conn *ct;
+
+ __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4));
+
+ ct = lookup_fn(ctx, NULL, 0, &opts_def, sizeof(opts_def));
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_einval_bpf_tuple = opts_def.error;
+
+ opts_def.reserved[0] = 1;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ opts_def.reserved[0] = 0;
+ opts_def.l4proto = IPPROTO_TCP;
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_einval_reserved = opts_def.error;
+
+ opts_def.netns_id = -2;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ opts_def.netns_id = -1;
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_einval_netns_id = opts_def.error;
+
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def) - 1);
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_einval_len_opts = opts_def.error;
+
+ opts_def.l4proto = IPPROTO_ICMP;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ opts_def.l4proto = IPPROTO_TCP;
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_eproto_l4proto = opts_def.error;
+
+ opts_def.netns_id = 0xf00f;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ opts_def.netns_id = -1;
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_enonet_netns_id = opts_def.error;
+
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_enoent_lookup = opts_def.error;
+
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4) - 1, &opts_def,
+ sizeof(opts_def));
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_eafnosupport = opts_def.error;
+
+ bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */
+ bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */
+ bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */
+ bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */
+
+ ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ if (ct) {
+ __u16 sport = bpf_get_prandom_u32();
+ __u16 dport = bpf_get_prandom_u32();
+ union nf_inet_addr saddr = {};
+ union nf_inet_addr daddr = {};
+ struct nf_conn *ct_ins;
+
+ bpf_ct_set_timeout(ct, 10000);
+ ct->mark = 77;
+
+ /* snat */
+ saddr.ip = bpf_get_prandom_u32();
+ bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local);
+ /* dnat */
+ daddr.ip = bpf_get_prandom_u32();
+ bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local);
+
+ ct_ins = bpf_ct_insert_entry(ct);
+ if (ct_ins) {
+ struct nf_conn *ct_lk;
+
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ if (ct_lk) {
+ struct nf_conntrack_tuple *tuple;
+
+ /* check snat and dnat addresses */
+ tuple = &ct_lk->tuplehash[IP_CT_DIR_REPLY].tuple;
+ if (tuple->dst.u3.ip == saddr.ip &&
+ tuple->dst.u.all == bpf_htons(sport))
+ test_snat_addr = 0;
+ if (tuple->src.u3.ip == daddr.ip &&
+ tuple->src.u.all == bpf_htons(dport))
+ test_dnat_addr = 0;
+
+ /* update ct entry timeout */
+ bpf_ct_change_timeout(ct_lk, 10000);
+ test_delta_timeout = ct_lk->timeout - bpf_jiffies64();
+ test_delta_timeout /= CONFIG_HZ;
+ test_insert_lookup_mark = ct_lk->mark;
+ bpf_ct_change_status(ct_lk,
+ IPS_CONFIRMED | IPS_SEEN_REPLY);
+ test_status = ct_lk->status;
+
+ bpf_ct_release(ct_lk);
+ test_succ_lookup = 0;
+ }
+ bpf_ct_release(ct_ins);
+ test_insert_entry = 0;
+ }
+ test_alloc_entry = 0;
+ }
+
+ bpf_tuple.ipv4.saddr = saddr;
+ bpf_tuple.ipv4.daddr = daddr;
+ bpf_tuple.ipv4.sport = sport;
+ bpf_tuple.ipv4.dport = dport;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ if (ct) {
+ test_exist_lookup = 0;
+ if (ct->mark == 42) {
+ ct->mark++;
+ test_exist_lookup_mark = ct->mark;
+ }
+ bpf_ct_release(ct);
+ } else {
+ test_exist_lookup = opts_def.error;
+ }
+}
+
+static __always_inline void
+nf_ct_opts_new_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___new *, u32),
+ struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___new *, u32),
+ void *ctx)
+{
+ struct bpf_ct_opts___new opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 };
+ struct bpf_sock_tuple bpf_tuple;
+ struct nf_conn *ct;
+
+ __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4));
+
+ opts_def.reserved[0] = 1;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ opts_def.reserved[0] = 0;
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_einval_reserved_new = opts_def.error;
+
+ bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */
+ bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */
+ bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */
+ bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */
+
+ /* use non-default ct zone */
+ opts_def.ct_zone_id = 10;
+ opts_def.ct_zone_dir = NF_CT_ZONE_DIR_ORIG;
+ ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ if (ct) {
+ __u16 sport = bpf_get_prandom_u32();
+ __u16 dport = bpf_get_prandom_u32();
+ union nf_inet_addr saddr = {};
+ union nf_inet_addr daddr = {};
+ struct nf_conn *ct_ins;
+
+ bpf_ct_set_timeout(ct, 10000);
+
+ /* snat */
+ saddr.ip = bpf_get_prandom_u32();
+ bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local);
+ /* dnat */
+ daddr.ip = bpf_get_prandom_u32();
+ bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local);
+
+ ct_ins = bpf_ct_insert_entry(ct);
+ if (ct_ins) {
+ struct nf_conn *ct_lk;
+
+ /* entry should exist in same ct zone we inserted it */
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ if (ct_lk) {
+ bpf_ct_release(ct_lk);
+ test_ct_zone_id_succ_lookup = 0;
+ }
+
+ /* entry should not exist with wrong direction */
+ opts_def.ct_zone_dir = NF_CT_ZONE_DIR_REPL;
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ opts_def.ct_zone_dir = NF_CT_ZONE_DIR_ORIG;
+ if (ct_lk)
+ bpf_ct_release(ct_lk);
+ else
+ test_ct_zone_dir_enoent_lookup = opts_def.error;
+
+ /* entry should not exist in default ct zone */
+ opts_def.ct_zone_id = 0;
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ if (ct_lk)
+ bpf_ct_release(ct_lk);
+ else
+ test_ct_zone_id_enoent_lookup = opts_def.error;
+
+ bpf_ct_release(ct_ins);
+ test_ct_zone_id_insert_entry = 0;
+ }
+ test_ct_zone_id_alloc_entry = 0;
+ }
+}
+
+SEC("xdp")
+int nf_xdp_ct_test(struct xdp_md *ctx)
+{
+ nf_ct_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx);
+ nf_ct_opts_new_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx);
+ return 0;
+}
+
+SEC("tc")
+int nf_skb_ct_test(struct __sk_buff *ctx)
+{
+ nf_ct_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx);
+ nf_ct_opts_new_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
new file mode 100644
index 000000000000..a586f087ffeb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct nf_conn;
+
+struct bpf_ct_opts___local {
+ s32 netns_id;
+ s32 error;
+ u8 l4proto;
+ u8 reserved[3];
+} __attribute__((preserve_access_index));
+
+struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_ct_insert_entry(struct nf_conn *) __ksym;
+void bpf_ct_release(struct nf_conn *) __ksym;
+void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym;
+int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
+int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
+int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
+
+SEC("?tc")
+int alloc_release(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ bpf_ct_release(ct);
+ return 0;
+}
+
+SEC("?tc")
+int insert_insert(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ ct = bpf_ct_insert_entry(ct);
+ if (!ct)
+ return 0;
+ ct = bpf_ct_insert_entry(ct);
+ return 0;
+}
+
+SEC("?tc")
+int lookup_insert(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_lookup(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ bpf_ct_insert_entry(ct);
+ return 0;
+}
+
+SEC("?tc")
+int write_not_allowlisted_field(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_lookup(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ ct->status = 0xF00;
+ return 0;
+}
+
+SEC("?tc")
+int set_timeout_after_insert(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ ct = bpf_ct_insert_entry(ct);
+ if (!ct)
+ return 0;
+ bpf_ct_set_timeout(ct, 0);
+ return 0;
+}
+
+SEC("?tc")
+int set_status_after_insert(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ ct = bpf_ct_insert_entry(ct);
+ if (!ct)
+ return 0;
+ bpf_ct_set_status(ct, 0);
+ return 0;
+}
+
+SEC("?tc")
+int change_timeout_after_alloc(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ bpf_ct_change_timeout(ct, 0);
+ return 0;
+}
+
+SEC("?tc")
+int change_status_after_alloc(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ bpf_ct_change_status(ct, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_btf_decl_tag.c b/tools/testing/selftests/bpf/progs/test_btf_decl_tag.c
new file mode 100644
index 000000000000..c88ccc53529a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_btf_decl_tag.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#if __has_attribute(btf_decl_tag)
+#define __tag1 __attribute__((btf_decl_tag("tag1")))
+#define __tag2 __attribute__((btf_decl_tag("tag2")))
+volatile const bool skip_tests __tag1 __tag2 = false;
+#else
+#define __tag1
+#define __tag2
+volatile const bool skip_tests = true;
+#endif
+
+struct key_t {
+ int a;
+ int b __tag1 __tag2;
+ int c;
+} __tag1 __tag2;
+
+typedef struct {
+ int a;
+ int b;
+} value_t __tag1 __tag2;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 3);
+ __type(key, struct key_t);
+ __type(value, value_t);
+} hashmap1 SEC(".maps");
+
+
+static __noinline int foo(int x __tag1 __tag2) __tag1 __tag2
+{
+ struct key_t key;
+ value_t val = {};
+
+ key.a = key.b = key.c = x;
+ bpf_map_update_elem(&hashmap1, &key, &val, 0);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(sub, int x)
+{
+ return foo(x);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_btf_ext.c b/tools/testing/selftests/bpf/progs/test_btf_ext.c
new file mode 100644
index 000000000000..cdf20331db04
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_btf_ext.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Meta Platforms Inc. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__noinline static void f0(void)
+{
+ __u64 a = 1;
+
+ __sink(a);
+}
+
+SEC("xdp")
+__u64 global_func(struct xdp_md *xdp)
+{
+ f0();
+ return XDP_DROP;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
deleted file mode 100644
index 31538c9ed193..000000000000
--- a/tools/testing/selftests/bpf/progs/test_btf_haskv.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018 Facebook */
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-#include "bpf_legacy.h"
-
-int _version SEC("version") = 1;
-
-struct ipv_counts {
- unsigned int v4;
- unsigned int v6;
-};
-
-struct bpf_map_def SEC("maps") btf_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(struct ipv_counts),
- .max_entries = 4,
-};
-
-BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts);
-
-__attribute__((noinline))
-int test_long_fname_2(void)
-{
- struct ipv_counts *counts;
- int key = 0;
-
- counts = bpf_map_lookup_elem(&btf_map, &key);
- if (!counts)
- return 0;
-
- counts->v6++;
-
- return 0;
-}
-
-__attribute__((noinline))
-int test_long_fname_1(void)
-{
- return test_long_fname_2();
-}
-
-SEC("dummy_tracepoint")
-int _dummy_tracepoint(void *arg)
-{
- return test_long_fname_1();
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c b/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
index c1e0c8c7c55f..c218cf8989a9 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
@@ -21,8 +21,8 @@ struct inner_map_sz2 {
struct outer_arr {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 3);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
/* it's possible to use anonymous struct as inner map definition here */
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
@@ -61,8 +61,8 @@ struct inner_map_sz4 {
struct outer_arr_dyn {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 3);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(map_flags, BPF_F_INNER_MAP);
@@ -81,7 +81,7 @@ struct outer_arr_dyn {
struct outer_hash {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 5);
- __uint(key_size, sizeof(int));
+ __type(key, int);
/* Here everything works flawlessly due to reuse of struct inner_map
* and compiler will complain at the attempt to use non-inner_map
* references below. This is great experience.
@@ -111,8 +111,8 @@ struct sockarr_sz2 {
struct outer_sockarr_sz1 {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
__array(values, struct sockarr_sz1);
} outer_sockarr SEC(".maps") = {
.values = { (void *)&sockarr_sz1 },
diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
index 6c5560162746..251854a041b5 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_newkv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
@@ -4,23 +4,11 @@
#include <bpf/bpf_helpers.h>
#include "bpf_legacy.h"
-int _version SEC("version") = 1;
-
struct ipv_counts {
unsigned int v4;
unsigned int v6;
};
-/* just to validate we can handle maps in multiple sections */
-struct bpf_map_def SEC("maps") btf_map_legacy = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(long long),
- .max_entries = 4,
-};
-
-BPF_ANNOTATE_KV_PAIR(btf_map_legacy, int, struct ipv_counts);
-
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 4);
@@ -40,11 +28,6 @@ int test_long_fname_2(void)
counts->v6++;
- /* just verify we can reference both maps */
- counts = bpf_map_lookup_elem(&btf_map_legacy, &key);
- if (!counts)
- return 0;
-
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
index 506da7fd2da2..1dabb88f8cb4 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_nokv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
@@ -3,19 +3,17 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
struct ipv_counts {
unsigned int v4;
unsigned int v6;
};
-struct bpf_map_def SEC("maps") btf_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(struct ipv_counts),
- .max_entries = 4,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(struct ipv_counts));
+ __uint(max_entries, 4);
+} btf_map SEC(".maps");
__attribute__((noinline))
int test_long_fname_2(void)
diff --git a/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
index 9a6b85dd52d2..1cd1a1b72cb5 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
@@ -1,31 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include <string.h>
-#include <errno.h>
-#include <netinet/in.h>
-#include <linux/stddef.h>
-#include <linux/bpf.h>
-#include <linux/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/if_ether.h>
-#include <linux/pkt_cls.h>
-
+#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-#include "bpf_tcp_helpers.h"
+
+#ifndef ENOENT
+#define ENOENT 2
+#endif
struct sockaddr_in6 srv_sa6 = {};
+struct sockaddr_in srv_sa4 = {};
__u16 listen_tp_sport = 0;
__u16 req_sk_sport = 0;
__u32 recv_cookie = 0;
__u32 gen_cookie = 0;
+__u32 mss = 0;
__u32 linum = 0;
#define LOG() ({ if (!linum) linum = __LINE__; })
-static void test_syncookie_helper(struct ipv6hdr *ip6h, struct tcphdr *th,
- struct tcp_sock *tp,
+static void test_syncookie_helper(void *iphdr, int iphdr_size,
+ struct tcphdr *th, struct tcp_sock *tp,
struct __sk_buff *skb)
{
if (th->syn) {
@@ -44,17 +40,18 @@ static void test_syncookie_helper(struct ipv6hdr *ip6h, struct tcphdr *th,
return;
}
- mss_cookie = bpf_tcp_gen_syncookie(tp, ip6h, sizeof(*ip6h),
+ mss_cookie = bpf_tcp_gen_syncookie(tp, iphdr, iphdr_size,
th, 40);
if (mss_cookie < 0) {
if (mss_cookie != -ENOENT)
LOG();
} else {
gen_cookie = (__u32)mss_cookie;
+ mss = mss_cookie >> 32;
}
} else if (gen_cookie) {
/* It was in cookie mode */
- int ret = bpf_tcp_check_syncookie(tp, ip6h, sizeof(*ip6h),
+ int ret = bpf_tcp_check_syncookie(tp, iphdr, iphdr_size,
th, sizeof(*th));
if (ret < 0) {
@@ -66,26 +63,58 @@ static void test_syncookie_helper(struct ipv6hdr *ip6h, struct tcphdr *th,
}
}
-static int handle_ip6_tcp(struct ipv6hdr *ip6h, struct __sk_buff *skb)
+static int handle_ip_tcp(struct ethhdr *eth, struct __sk_buff *skb)
{
- struct bpf_sock_tuple *tuple;
+ struct bpf_sock_tuple *tuple = NULL;
+ unsigned int tuple_len = 0;
struct bpf_sock *bpf_skc;
- unsigned int tuple_len;
+ void *data_end, *iphdr;
+ struct ipv6hdr *ip6h;
+ struct iphdr *ip4h;
struct tcphdr *th;
- void *data_end;
+ int iphdr_size;
data_end = (void *)(long)(skb->data_end);
- th = (struct tcphdr *)(ip6h + 1);
- if (th + 1 > data_end)
- return TC_ACT_OK;
-
- /* Is it the testing traffic? */
- if (th->dest != srv_sa6.sin6_port)
+ switch (eth->h_proto) {
+ case bpf_htons(ETH_P_IP):
+ ip4h = (struct iphdr *)(eth + 1);
+ if (ip4h + 1 > data_end)
+ return TC_ACT_OK;
+ if (ip4h->protocol != IPPROTO_TCP)
+ return TC_ACT_OK;
+ th = (struct tcphdr *)(ip4h + 1);
+ if (th + 1 > data_end)
+ return TC_ACT_OK;
+ /* Is it the testing traffic? */
+ if (th->dest != srv_sa4.sin_port)
+ return TC_ACT_OK;
+ tuple_len = sizeof(tuple->ipv4);
+ tuple = (struct bpf_sock_tuple *)&ip4h->saddr;
+ iphdr = ip4h;
+ iphdr_size = sizeof(*ip4h);
+ break;
+ case bpf_htons(ETH_P_IPV6):
+ ip6h = (struct ipv6hdr *)(eth + 1);
+ if (ip6h + 1 > data_end)
+ return TC_ACT_OK;
+ if (ip6h->nexthdr != IPPROTO_TCP)
+ return TC_ACT_OK;
+ th = (struct tcphdr *)(ip6h + 1);
+ if (th + 1 > data_end)
+ return TC_ACT_OK;
+ /* Is it the testing traffic? */
+ if (th->dest != srv_sa6.sin6_port)
+ return TC_ACT_OK;
+ tuple_len = sizeof(tuple->ipv6);
+ tuple = (struct bpf_sock_tuple *)&ip6h->saddr;
+ iphdr = ip6h;
+ iphdr_size = sizeof(*ip6h);
+ break;
+ default:
return TC_ACT_OK;
+ }
- tuple_len = sizeof(tuple->ipv6);
- tuple = (struct bpf_sock_tuple *)&ip6h->saddr;
if ((void *)tuple + tuple_len > data_end) {
LOG();
return TC_ACT_OK;
@@ -132,7 +161,7 @@ static int handle_ip6_tcp(struct ipv6hdr *ip6h, struct __sk_buff *skb)
listen_tp_sport = tp->inet_conn.icsk_inet.sk.__sk_common.skc_num;
- test_syncookie_helper(ip6h, th, tp, skb);
+ test_syncookie_helper(iphdr, iphdr_size, th, tp, skb);
bpf_sk_release(tp);
return TC_ACT_OK;
}
@@ -145,10 +174,9 @@ release:
return TC_ACT_OK;
}
-SEC("classifier/ingress")
+SEC("tc")
int cls_ingress(struct __sk_buff *skb)
{
- struct ipv6hdr *ip6h;
struct ethhdr *eth;
void *data_end;
@@ -158,17 +186,11 @@ int cls_ingress(struct __sk_buff *skb)
if (eth + 1 > data_end)
return TC_ACT_OK;
- if (eth->h_proto != bpf_htons(ETH_P_IPV6))
- return TC_ACT_OK;
-
- ip6h = (struct ipv6hdr *)(eth + 1);
- if (ip6h + 1 > data_end)
+ if (eth->h_proto != bpf_htons(ETH_P_IP) &&
+ eth->h_proto != bpf_htons(ETH_P_IPV6))
return TC_ACT_OK;
- if (ip6h->nexthdr == IPPROTO_TCP)
- return handle_ip6_tcp(ip6h, skb);
-
- return TC_ACT_OK;
+ return handle_ip_tcp(eth, skb);
}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_build_id.c b/tools/testing/selftests/bpf/progs/test_build_id.c
new file mode 100644
index 000000000000..32ce59f9aa27
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_build_id.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct bpf_stack_build_id stack_sleepable[128];
+int res_sleepable;
+
+struct bpf_stack_build_id stack_nofault[128];
+int res_nofault;
+
+SEC("uprobe.multi/./uprobe_multi:uprobe")
+int uprobe_nofault(struct pt_regs *ctx)
+{
+ res_nofault = bpf_get_stack(ctx, stack_nofault, sizeof(stack_nofault),
+ BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
+
+ return 0;
+}
+
+SEC("uprobe.multi.s/./uprobe_multi:uprobe")
+int uprobe_sleepable(struct pt_regs *ctx)
+{
+ res_sleepable = bpf_get_stack(ctx, stack_sleepable, sizeof(stack_sleepable),
+ BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c b/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c
new file mode 100644
index 000000000000..4fee0fdc7607
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+__u32 target_ancestor_level;
+__u64 target_ancestor_cgid;
+int target_pid, target_hid;
+
+struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
+struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
+void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
+
+static int bpf_link_create_verify(int cmd)
+{
+ struct cgroup *cgrp, *ancestor;
+ struct task_struct *task;
+ int ret = 0;
+
+ if (cmd != BPF_LINK_CREATE)
+ return 0;
+
+ task = bpf_get_current_task_btf();
+
+ /* Then it can run in parallel with others */
+ if (task->pid != target_pid)
+ return 0;
+
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ /* Refuse it if its cgid or its ancestor's cgid is the target cgid */
+ if (cgrp->kn->id == target_ancestor_cgid)
+ ret = -1;
+
+ ancestor = bpf_cgroup_ancestor(cgrp, target_ancestor_level);
+ if (!ancestor)
+ goto out;
+
+ if (ancestor->kn->id == target_ancestor_cgid)
+ ret = -1;
+ bpf_cgroup_release(ancestor);
+
+out:
+ bpf_cgroup_release(cgrp);
+ return ret;
+}
+
+SEC("lsm/bpf")
+int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ return bpf_link_create_verify(cmd);
+}
+
+SEC("lsm.s/bpf")
+int BPF_PROG(lsm_s_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ return bpf_link_create_verify(cmd);
+}
+
+SEC("fentry")
+int BPF_PROG(fentry_run)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_cgroup_link.c b/tools/testing/selftests/bpf/progs/test_cgroup_link.c
index 77e47b9e4446..4faba88e45a5 100644
--- a/tools/testing/selftests/bpf/progs/test_cgroup_link.c
+++ b/tools/testing/selftests/bpf/progs/test_cgroup_link.c
@@ -6,14 +6,14 @@
int calls = 0;
int alt_calls = 0;
-SEC("cgroup_skb/egress1")
+SEC("cgroup_skb/egress")
int egress(struct __sk_buff *skb)
{
__sync_fetch_and_add(&calls, 1);
return 1;
}
-SEC("cgroup_skb/egress2")
+SEC("cgroup_skb/egress")
int egress_alt(struct __sk_buff *skb)
{
__sync_fetch_and_add(&alt_calls, 1);
diff --git a/tools/testing/selftests/bpf/progs/test_check_mtu.c b/tools/testing/selftests/bpf/progs/test_check_mtu.c
new file mode 100644
index 000000000000..2ec1de11a3ae
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_check_mtu.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Jesper Dangaard Brouer */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/if_ether.h>
+
+#include <stddef.h>
+#include <stdint.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* Userspace will update with MTU it can see on device */
+volatile const int GLOBAL_USER_MTU;
+volatile const __u32 GLOBAL_USER_IFINDEX;
+
+/* BPF-prog will update these with MTU values it can see */
+__u32 global_bpf_mtu_xdp = 0;
+__u32 global_bpf_mtu_tc = 0;
+
+SEC("xdp")
+int xdp_use_helper_basic(struct xdp_md *ctx)
+{
+ __u32 mtu_len = 0;
+
+ if (bpf_check_mtu(ctx, 0, &mtu_len, 0, 0))
+ return XDP_ABORTED;
+
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int xdp_use_helper(struct xdp_md *ctx)
+{
+ int retval = XDP_PASS; /* Expected retval on successful test */
+ __u32 mtu_len = 0;
+ __u32 ifindex = 0;
+ int delta = 0;
+
+ /* When ifindex is zero, save net_device lookup and use ctx netdev */
+ if (GLOBAL_USER_IFINDEX > 0)
+ ifindex = GLOBAL_USER_IFINDEX;
+
+ if (bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0)) {
+ /* mtu_len is also valid when check fail */
+ retval = XDP_ABORTED;
+ goto out;
+ }
+
+ if (mtu_len != GLOBAL_USER_MTU)
+ retval = XDP_DROP;
+
+out:
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("xdp")
+int xdp_exceed_mtu(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ __u32 data_len = data_end - data;
+ int retval = XDP_ABORTED; /* Fail */
+ __u32 mtu_len = 0;
+ int delta;
+ int err;
+
+ /* Exceed MTU with 1 via delta adjust */
+ delta = GLOBAL_USER_MTU - (data_len - ETH_HLEN) + 1;
+
+ err = bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0);
+ if (err) {
+ retval = XDP_PASS; /* Success in exceeding MTU check */
+ if (err != BPF_MTU_CHK_RET_FRAG_NEEDED)
+ retval = XDP_DROP;
+ }
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("xdp")
+int xdp_minus_delta(struct xdp_md *ctx)
+{
+ int retval = XDP_PASS; /* Expected retval on successful test */
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ __u32 data_len = data_end - data;
+ __u32 mtu_len = 0;
+ int delta;
+
+ /* Borderline test case: Minus delta exceeding packet length allowed */
+ delta = -((data_len - ETH_HLEN) + 1);
+
+ /* Minus length (adjusted via delta) still pass MTU check, other helpers
+ * are responsible for catching this, when doing actual size adjust
+ */
+ if (bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0))
+ retval = XDP_ABORTED;
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("xdp")
+int xdp_input_len(struct xdp_md *ctx)
+{
+ int retval = XDP_PASS; /* Expected retval on successful test */
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ __u32 data_len = data_end - data;
+
+ /* API allow user give length to check as input via mtu_len param,
+ * resulting MTU value is still output in mtu_len param after call.
+ *
+ * Input len is L3, like MTU and iph->tot_len.
+ * Remember XDP data_len is L2.
+ */
+ __u32 mtu_len = data_len - ETH_HLEN;
+
+ if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0))
+ retval = XDP_ABORTED;
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("xdp")
+int xdp_input_len_exceed(struct xdp_md *ctx)
+{
+ int retval = XDP_ABORTED; /* Fail */
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ int err;
+
+ /* API allow user give length to check as input via mtu_len param,
+ * resulting MTU value is still output in mtu_len param after call.
+ *
+ * Input length value is L3 size like MTU.
+ */
+ __u32 mtu_len = GLOBAL_USER_MTU;
+
+ mtu_len += 1; /* Exceed with 1 */
+
+ err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0);
+ if (err == BPF_MTU_CHK_RET_FRAG_NEEDED)
+ retval = XDP_PASS ; /* Success in exceeding MTU check */
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_use_helper(struct __sk_buff *ctx)
+{
+ int retval = BPF_OK; /* Expected retval on successful test */
+ __u32 mtu_len = 0;
+ int delta = 0;
+
+ if (bpf_check_mtu(ctx, 0, &mtu_len, delta, 0)) {
+ retval = BPF_DROP;
+ goto out;
+ }
+
+ if (mtu_len != GLOBAL_USER_MTU)
+ retval = BPF_REDIRECT;
+out:
+ global_bpf_mtu_tc = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_exceed_mtu(struct __sk_buff *ctx)
+{
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ int retval = BPF_DROP; /* Fail */
+ __u32 skb_len = ctx->len;
+ __u32 mtu_len = 0;
+ int delta;
+ int err;
+
+ /* Exceed MTU with 1 via delta adjust */
+ delta = GLOBAL_USER_MTU - (skb_len - ETH_HLEN) + 1;
+
+ err = bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0);
+ if (err) {
+ retval = BPF_OK; /* Success in exceeding MTU check */
+ if (err != BPF_MTU_CHK_RET_FRAG_NEEDED)
+ retval = BPF_DROP;
+ }
+
+ global_bpf_mtu_tc = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_exceed_mtu_da(struct __sk_buff *ctx)
+{
+ /* SKB Direct-Access variant */
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ __u32 data_len = data_end - data;
+ int retval = BPF_DROP; /* Fail */
+ __u32 mtu_len = 0;
+ int delta;
+ int err;
+
+ /* Exceed MTU with 1 via delta adjust */
+ delta = GLOBAL_USER_MTU - (data_len - ETH_HLEN) + 1;
+
+ err = bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0);
+ if (err) {
+ retval = BPF_OK; /* Success in exceeding MTU check */
+ if (err != BPF_MTU_CHK_RET_FRAG_NEEDED)
+ retval = BPF_DROP;
+ }
+
+ global_bpf_mtu_tc = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_minus_delta(struct __sk_buff *ctx)
+{
+ int retval = BPF_OK; /* Expected retval on successful test */
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ __u32 skb_len = ctx->len;
+ __u32 mtu_len = 0;
+ int delta;
+
+ /* Borderline test case: Minus delta exceeding packet length allowed */
+ delta = -((skb_len - ETH_HLEN) + 1);
+
+ /* Minus length (adjusted via delta) still pass MTU check, other helpers
+ * are responsible for catching this, when doing actual size adjust
+ */
+ if (bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0))
+ retval = BPF_DROP;
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_input_len(struct __sk_buff *ctx)
+{
+ int retval = BPF_OK; /* Expected retval on successful test */
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+
+ /* API allow user give length to check as input via mtu_len param,
+ * resulting MTU value is still output in mtu_len param after call.
+ *
+ * Input length value is L3 size.
+ */
+ __u32 mtu_len = GLOBAL_USER_MTU;
+
+ if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0))
+ retval = BPF_DROP;
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_input_len_exceed(struct __sk_buff *ctx)
+{
+ int retval = BPF_DROP; /* Fail */
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ int err;
+
+ /* API allow user give length to check as input via mtu_len param,
+ * resulting MTU value is still output in mtu_len param after call.
+ *
+ * Input length value is L3 size like MTU.
+ */
+ __u32 mtu_len = GLOBAL_USER_MTU;
+
+ mtu_len += 1; /* Exceed with 1 */
+
+ err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0);
+ if (err == BPF_MTU_CHK_RET_FRAG_NEEDED)
+ retval = BPF_OK; /* Success in exceeding MTU check */
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index c9f8464996ea..f344c6835e84 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -15,13 +15,16 @@
#include <linux/ipv6.h>
#include <linux/pkt_cls.h>
#include <linux/tcp.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
#include "test_cls_redirect.h"
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
+
#ifdef SUBPROGS
#define INLINING __noinline
#else
@@ -39,8 +42,8 @@ char _license[] SEC("license") = "Dual BSD/GPL";
/**
* Destination port and IP used for UDP encapsulation.
*/
-static volatile const __be16 ENCAPSULATION_PORT;
-static volatile const __be32 ENCAPSULATION_IP;
+volatile const __be16 ENCAPSULATION_PORT;
+volatile const __be32 ENCAPSULATION_IP;
typedef struct {
uint64_t processed_packets_total;
@@ -70,6 +73,7 @@ typedef struct {
uint64_t errors_total_encap_adjust_failed;
uint64_t errors_total_encap_buffer_too_small;
uint64_t errors_total_redirect_loop;
+ uint64_t errors_total_encap_mtu_violate;
} metrics_t;
typedef enum {
@@ -266,7 +270,7 @@ static INLINING void pkt_ipv4_checksum(struct iphdr *iph)
uint32_t acc = 0;
uint16_t *ipw = (uint16_t *)iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (size_t i = 0; i < sizeof(struct iphdr) / 2; i++) {
acc += ipw[i];
}
@@ -293,7 +297,7 @@ bool pkt_skip_ipv6_extension_headers(buf_t *pkt,
};
*is_fragment = false;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < 6; i++) {
switch (exthdr.next) {
case IPPROTO_FRAGMENT:
@@ -407,6 +411,7 @@ static INLINING ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *e
payload_off - sizeof(struct ethhdr) - sizeof(struct iphdr);
int32_t delta = sizeof(struct gre_base_hdr) - encap_overhead;
uint16_t proto = ETH_P_IP;
+ uint32_t mtu_len = 0;
/* Loop protection: the inner packet's TTL is decremented as a safeguard
* against any forwarding loop. As the only interesting field is the TTL
@@ -479,6 +484,11 @@ static INLINING ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *e
}
}
+ if (bpf_check_mtu(skb, skb->ifindex, &mtu_len, delta, 0)) {
+ metrics->errors_total_encap_mtu_violate++;
+ return TC_ACT_SHOT;
+ }
+
if (bpf_skb_adjust_room(skb, delta, BPF_ADJ_ROOM_NET,
BPF_F_ADJ_ROOM_FIXED_GSO |
BPF_F_ADJ_ROOM_NO_CSUM_RESET) ||
@@ -593,7 +603,7 @@ static INLINING ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
return TC_ACT_SHOT;
}
- /* Skip the remainig next hops (may be zero). */
+ /* Skip the remaining next hops (may be zero). */
return skip_next_hops(pkt, encap->unigue.hop_count -
encap->unigue.next_hop - 1);
}
@@ -603,8 +613,8 @@ static INLINING ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
*
* fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321)
*
- * clang will substitue a costant for sizeof, which allows the verifier
- * to track it's value. Based on this, it can figure out the constant
+ * clang will substitute a constant for sizeof, which allows the verifier
+ * to track its value. Based on this, it can figure out the constant
* return value, and calling code works while still being "generic" to
* IPv4 and IPv6.
*/
@@ -921,7 +931,7 @@ static INLINING verdict_t process_ipv6(buf_t *pkt, metrics_t *metrics)
}
}
-SEC("classifier/cls_redirect")
+SEC("tc")
int cls_redirect(struct __sk_buff *skb)
{
metrics_t *metrics = get_global_metrics();
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.h b/tools/testing/selftests/bpf/progs/test_cls_redirect.h
index 76eab0aacba0..eb55cb8a3dbd 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.h
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.h
@@ -10,7 +10,16 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
+
+/* offsetof() is used in static asserts, and the libbpf-redefined CO-RE
+ * friendly version breaks compilation for older clang versions <= 15
+ * when invoked in a static assert. Restore original here.
+ */
+#ifdef offsetof
+#undef offsetof
+#define offsetof(type, member) __builtin_offsetof(type, member)
+#endif
struct gre_base_hdr {
uint16_t flags;
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
new file mode 100644
index 000000000000..d0f7670351e5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+// Copyright (c) 2019, 2020 Cloudflare
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <linux/bpf.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/pkt_cls.h>
+#include <linux/tcp.h>
+#include <netinet/udp.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include "test_cls_redirect.h"
+#include "bpf_kfuncs.h"
+
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
+
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
+
+#define IP_OFFSET_MASK (0x1FFF)
+#define IP_MF (0x2000)
+
+char _license[] SEC("license") = "Dual BSD/GPL";
+
+/**
+ * Destination port and IP used for UDP encapsulation.
+ */
+volatile const __be16 ENCAPSULATION_PORT;
+volatile const __be32 ENCAPSULATION_IP;
+
+typedef struct {
+ uint64_t processed_packets_total;
+ uint64_t l3_protocol_packets_total_ipv4;
+ uint64_t l3_protocol_packets_total_ipv6;
+ uint64_t l4_protocol_packets_total_tcp;
+ uint64_t l4_protocol_packets_total_udp;
+ uint64_t accepted_packets_total_syn;
+ uint64_t accepted_packets_total_syn_cookies;
+ uint64_t accepted_packets_total_last_hop;
+ uint64_t accepted_packets_total_icmp_echo_request;
+ uint64_t accepted_packets_total_established;
+ uint64_t forwarded_packets_total_gue;
+ uint64_t forwarded_packets_total_gre;
+
+ uint64_t errors_total_unknown_l3_proto;
+ uint64_t errors_total_unknown_l4_proto;
+ uint64_t errors_total_malformed_ip;
+ uint64_t errors_total_fragmented_ip;
+ uint64_t errors_total_malformed_icmp;
+ uint64_t errors_total_unwanted_icmp;
+ uint64_t errors_total_malformed_icmp_pkt_too_big;
+ uint64_t errors_total_malformed_tcp;
+ uint64_t errors_total_malformed_udp;
+ uint64_t errors_total_icmp_echo_replies;
+ uint64_t errors_total_malformed_encapsulation;
+ uint64_t errors_total_encap_adjust_failed;
+ uint64_t errors_total_encap_buffer_too_small;
+ uint64_t errors_total_redirect_loop;
+ uint64_t errors_total_encap_mtu_violate;
+} metrics_t;
+
+typedef enum {
+ INVALID = 0,
+ UNKNOWN,
+ ECHO_REQUEST,
+ SYN,
+ SYN_COOKIE,
+ ESTABLISHED,
+} verdict_t;
+
+typedef struct {
+ uint16_t src, dst;
+} flow_ports_t;
+
+_Static_assert(
+ sizeof(flow_ports_t) !=
+ offsetofend(struct bpf_sock_tuple, ipv4.dport) -
+ offsetof(struct bpf_sock_tuple, ipv4.sport) - 1,
+ "flow_ports_t must match sport and dport in struct bpf_sock_tuple");
+_Static_assert(
+ sizeof(flow_ports_t) !=
+ offsetofend(struct bpf_sock_tuple, ipv6.dport) -
+ offsetof(struct bpf_sock_tuple, ipv6.sport) - 1,
+ "flow_ports_t must match sport and dport in struct bpf_sock_tuple");
+
+struct iphdr_info {
+ void *hdr;
+ __u64 len;
+};
+
+typedef int ret_t;
+
+/* This is a bit of a hack. We need a return value which allows us to
+ * indicate that the regular flow of the program should continue,
+ * while allowing functions to use XDP_PASS and XDP_DROP, etc.
+ */
+static const ret_t CONTINUE_PROCESSING = -1;
+
+/* Convenience macro to call functions which return ret_t.
+ */
+#define MAYBE_RETURN(x) \
+ do { \
+ ret_t __ret = x; \
+ if (__ret != CONTINUE_PROCESSING) \
+ return __ret; \
+ } while (0)
+
+static bool ipv4_is_fragment(const struct iphdr *ip)
+{
+ uint16_t frag_off = ip->frag_off & bpf_htons(IP_OFFSET_MASK);
+ return (ip->frag_off & bpf_htons(IP_MF)) != 0 || frag_off > 0;
+}
+
+static int pkt_parse_ipv4(struct bpf_dynptr *dynptr, __u64 *offset, struct iphdr *iphdr)
+{
+ if (bpf_dynptr_read(iphdr, sizeof(*iphdr), dynptr, *offset, 0))
+ return -1;
+
+ *offset += sizeof(*iphdr);
+
+ if (iphdr->ihl < 5)
+ return -1;
+
+ /* skip ipv4 options */
+ *offset += (iphdr->ihl - 5) * 4;
+
+ return 0;
+}
+
+/* Parse the L4 ports from a packet, assuming a layout like TCP or UDP. */
+static bool pkt_parse_icmp_l4_ports(struct bpf_dynptr *dynptr, __u64 *offset, flow_ports_t *ports)
+{
+ if (bpf_dynptr_read(ports, sizeof(*ports), dynptr, *offset, 0))
+ return false;
+
+ *offset += sizeof(*ports);
+
+ /* Ports in the L4 headers are reversed, since we are parsing an ICMP
+ * payload which is going towards the eyeball.
+ */
+ uint16_t dst = ports->src;
+ ports->src = ports->dst;
+ ports->dst = dst;
+ return true;
+}
+
+static uint16_t pkt_checksum_fold(uint32_t csum)
+{
+ /* The highest reasonable value for an IPv4 header
+ * checksum requires two folds, so we just do that always.
+ */
+ csum = (csum & 0xffff) + (csum >> 16);
+ csum = (csum & 0xffff) + (csum >> 16);
+ return (uint16_t)~csum;
+}
+
+static void pkt_ipv4_checksum(struct iphdr *iph)
+{
+ iph->check = 0;
+
+ /* An IP header without options is 20 bytes. Two of those
+ * are the checksum, which we always set to zero. Hence,
+ * the maximum accumulated value is 18 / 2 * 0xffff = 0x8fff7,
+ * which fits in 32 bit.
+ */
+ _Static_assert(sizeof(struct iphdr) == 20, "iphdr must be 20 bytes");
+ uint32_t acc = 0;
+ uint16_t *ipw = (uint16_t *)iph;
+
+ for (size_t i = 0; i < sizeof(struct iphdr) / 2; i++)
+ acc += ipw[i];
+
+ iph->check = pkt_checksum_fold(acc);
+}
+
+static bool pkt_skip_ipv6_extension_headers(struct bpf_dynptr *dynptr, __u64 *offset,
+ const struct ipv6hdr *ipv6, uint8_t *upper_proto,
+ bool *is_fragment)
+{
+ /* We understand five extension headers.
+ * https://tools.ietf.org/html/rfc8200#section-4.1 states that all
+ * headers should occur once, except Destination Options, which may
+ * occur twice. Hence we give up after 6 headers.
+ */
+ struct {
+ uint8_t next;
+ uint8_t len;
+ } exthdr = {
+ .next = ipv6->nexthdr,
+ };
+ *is_fragment = false;
+
+ for (int i = 0; i < 6; i++) {
+ switch (exthdr.next) {
+ case IPPROTO_FRAGMENT:
+ *is_fragment = true;
+ /* NB: We don't check that hdrlen == 0 as per spec. */
+ /* fallthrough; */
+
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_ROUTING:
+ case IPPROTO_DSTOPTS:
+ case IPPROTO_MH:
+ if (bpf_dynptr_read(&exthdr, sizeof(exthdr), dynptr, *offset, 0))
+ return false;
+
+ /* hdrlen is in 8-octet units, and excludes the first 8 octets. */
+ *offset += (exthdr.len + 1) * 8;
+
+ /* Decode next header */
+ break;
+
+ default:
+ /* The next header is not one of the known extension
+ * headers, treat it as the upper layer header.
+ *
+ * This handles IPPROTO_NONE.
+ *
+ * Encapsulating Security Payload (50) and Authentication
+ * Header (51) also end up here (and will trigger an
+ * unknown proto error later). They have a custom header
+ * format and seem too esoteric to care about.
+ */
+ *upper_proto = exthdr.next;
+ return true;
+ }
+ }
+
+ /* We never found an upper layer header. */
+ return false;
+}
+
+static int pkt_parse_ipv6(struct bpf_dynptr *dynptr, __u64 *offset, struct ipv6hdr *ipv6,
+ uint8_t *proto, bool *is_fragment)
+{
+ if (bpf_dynptr_read(ipv6, sizeof(*ipv6), dynptr, *offset, 0))
+ return -1;
+
+ *offset += sizeof(*ipv6);
+
+ if (!pkt_skip_ipv6_extension_headers(dynptr, offset, ipv6, proto, is_fragment))
+ return -1;
+
+ return 0;
+}
+
+/* Global metrics, per CPU
+ */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, unsigned int);
+ __type(value, metrics_t);
+} metrics_map SEC(".maps");
+
+static metrics_t *get_global_metrics(void)
+{
+ uint64_t key = 0;
+ return bpf_map_lookup_elem(&metrics_map, &key);
+}
+
+static ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap)
+{
+ const int payload_off =
+ sizeof(*encap) +
+ sizeof(struct in_addr) * encap->unigue.hop_count;
+ int32_t encap_overhead = payload_off - sizeof(struct ethhdr);
+
+ /* Changing the ethertype if the encapsulated packet is ipv6 */
+ if (encap->gue.proto_ctype == IPPROTO_IPV6)
+ encap->eth.h_proto = bpf_htons(ETH_P_IPV6);
+
+ if (bpf_skb_adjust_room(skb, -encap_overhead, BPF_ADJ_ROOM_MAC,
+ BPF_F_ADJ_ROOM_FIXED_GSO |
+ BPF_F_ADJ_ROOM_NO_CSUM_RESET) ||
+ bpf_csum_level(skb, BPF_CSUM_LEVEL_DEC))
+ return TC_ACT_SHOT;
+
+ return bpf_redirect(skb->ifindex, BPF_F_INGRESS);
+}
+
+static ret_t forward_with_gre(struct __sk_buff *skb, struct bpf_dynptr *dynptr,
+ encap_headers_t *encap, struct in_addr *next_hop,
+ metrics_t *metrics)
+{
+ const int payload_off =
+ sizeof(*encap) +
+ sizeof(struct in_addr) * encap->unigue.hop_count;
+ int32_t encap_overhead =
+ payload_off - sizeof(struct ethhdr) - sizeof(struct iphdr);
+ int32_t delta = sizeof(struct gre_base_hdr) - encap_overhead;
+ __u8 encap_buffer[sizeof(encap_gre_t)] = {};
+ uint16_t proto = ETH_P_IP;
+ uint32_t mtu_len = 0;
+ encap_gre_t *encap_gre;
+
+ metrics->forwarded_packets_total_gre++;
+
+ /* Loop protection: the inner packet's TTL is decremented as a safeguard
+ * against any forwarding loop. As the only interesting field is the TTL
+ * hop limit for IPv6, it is easier to use bpf_skb_load_bytes/bpf_skb_store_bytes
+ * as they handle the split packets if needed (no need for the data to be
+ * in the linear section).
+ */
+ if (encap->gue.proto_ctype == IPPROTO_IPV6) {
+ proto = ETH_P_IPV6;
+ uint8_t ttl;
+ int rc;
+
+ rc = bpf_skb_load_bytes(
+ skb, payload_off + offsetof(struct ipv6hdr, hop_limit),
+ &ttl, 1);
+ if (rc != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (ttl == 0) {
+ metrics->errors_total_redirect_loop++;
+ return TC_ACT_SHOT;
+ }
+
+ ttl--;
+ rc = bpf_skb_store_bytes(
+ skb, payload_off + offsetof(struct ipv6hdr, hop_limit),
+ &ttl, 1, 0);
+ if (rc != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+ } else {
+ uint8_t ttl;
+ int rc;
+
+ rc = bpf_skb_load_bytes(
+ skb, payload_off + offsetof(struct iphdr, ttl), &ttl,
+ 1);
+ if (rc != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (ttl == 0) {
+ metrics->errors_total_redirect_loop++;
+ return TC_ACT_SHOT;
+ }
+
+ /* IPv4 also has a checksum to patch. While the TTL is only one byte,
+ * this function only works for 2 and 4 bytes arguments (the result is
+ * the same).
+ */
+ rc = bpf_l3_csum_replace(
+ skb, payload_off + offsetof(struct iphdr, check), ttl,
+ ttl - 1, 2);
+ if (rc != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ ttl--;
+ rc = bpf_skb_store_bytes(
+ skb, payload_off + offsetof(struct iphdr, ttl), &ttl, 1,
+ 0);
+ if (rc != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+ }
+
+ if (bpf_check_mtu(skb, skb->ifindex, &mtu_len, delta, 0)) {
+ metrics->errors_total_encap_mtu_violate++;
+ return TC_ACT_SHOT;
+ }
+
+ if (bpf_skb_adjust_room(skb, delta, BPF_ADJ_ROOM_NET,
+ BPF_F_ADJ_ROOM_FIXED_GSO |
+ BPF_F_ADJ_ROOM_NO_CSUM_RESET) ||
+ bpf_csum_level(skb, BPF_CSUM_LEVEL_INC)) {
+ metrics->errors_total_encap_adjust_failed++;
+ return TC_ACT_SHOT;
+ }
+
+ if (bpf_skb_pull_data(skb, sizeof(encap_gre_t))) {
+ metrics->errors_total_encap_buffer_too_small++;
+ return TC_ACT_SHOT;
+ }
+
+ encap_gre = bpf_dynptr_slice_rdwr(dynptr, 0, encap_buffer, sizeof(encap_buffer));
+ if (!encap_gre) {
+ metrics->errors_total_encap_buffer_too_small++;
+ return TC_ACT_SHOT;
+ }
+
+ encap_gre->ip.protocol = IPPROTO_GRE;
+ encap_gre->ip.daddr = next_hop->s_addr;
+ encap_gre->ip.saddr = ENCAPSULATION_IP;
+ encap_gre->ip.tot_len =
+ bpf_htons(bpf_ntohs(encap_gre->ip.tot_len) + delta);
+ encap_gre->gre.flags = 0;
+ encap_gre->gre.protocol = bpf_htons(proto);
+ pkt_ipv4_checksum((void *)&encap_gre->ip);
+
+ if (encap_gre == encap_buffer)
+ bpf_dynptr_write(dynptr, 0, encap_buffer, sizeof(encap_buffer), 0);
+
+ return bpf_redirect(skb->ifindex, 0);
+}
+
+static ret_t forward_to_next_hop(struct __sk_buff *skb, struct bpf_dynptr *dynptr,
+ encap_headers_t *encap, struct in_addr *next_hop,
+ metrics_t *metrics)
+{
+ /* swap L2 addresses */
+ /* This assumes that packets are received from a router.
+ * So just swapping the MAC addresses here will make the packet go back to
+ * the router, which will send it to the appropriate machine.
+ */
+ unsigned char temp[ETH_ALEN];
+ memcpy(temp, encap->eth.h_dest, sizeof(temp));
+ memcpy(encap->eth.h_dest, encap->eth.h_source,
+ sizeof(encap->eth.h_dest));
+ memcpy(encap->eth.h_source, temp, sizeof(encap->eth.h_source));
+
+ if (encap->unigue.next_hop == encap->unigue.hop_count - 1 &&
+ encap->unigue.last_hop_gre) {
+ return forward_with_gre(skb, dynptr, encap, next_hop, metrics);
+ }
+
+ metrics->forwarded_packets_total_gue++;
+ uint32_t old_saddr = encap->ip.saddr;
+ encap->ip.saddr = encap->ip.daddr;
+ encap->ip.daddr = next_hop->s_addr;
+ if (encap->unigue.next_hop < encap->unigue.hop_count) {
+ encap->unigue.next_hop++;
+ }
+
+ /* Remove ip->saddr, add next_hop->s_addr */
+ const uint64_t off = offsetof(typeof(*encap), ip.check);
+ int ret = bpf_l3_csum_replace(skb, off, old_saddr, next_hop->s_addr, 4);
+ if (ret < 0) {
+ return TC_ACT_SHOT;
+ }
+
+ return bpf_redirect(skb->ifindex, 0);
+}
+
+static ret_t skip_next_hops(__u64 *offset, int n)
+{
+ switch (n) {
+ case 1:
+ *offset += sizeof(struct in_addr);
+ case 0:
+ return CONTINUE_PROCESSING;
+
+ default:
+ return TC_ACT_SHOT;
+ }
+}
+
+/* Get the next hop from the GLB header.
+ *
+ * Sets next_hop->s_addr to 0 if there are no more hops left.
+ * pkt is positioned just after the variable length GLB header
+ * iff the call is successful.
+ */
+static ret_t get_next_hop(struct bpf_dynptr *dynptr, __u64 *offset, encap_headers_t *encap,
+ struct in_addr *next_hop)
+{
+ if (encap->unigue.next_hop > encap->unigue.hop_count)
+ return TC_ACT_SHOT;
+
+ /* Skip "used" next hops. */
+ MAYBE_RETURN(skip_next_hops(offset, encap->unigue.next_hop));
+
+ if (encap->unigue.next_hop == encap->unigue.hop_count) {
+ /* No more next hops, we are at the end of the GLB header. */
+ next_hop->s_addr = 0;
+ return CONTINUE_PROCESSING;
+ }
+
+ if (bpf_dynptr_read(next_hop, sizeof(*next_hop), dynptr, *offset, 0))
+ return TC_ACT_SHOT;
+
+ *offset += sizeof(*next_hop);
+
+ /* Skip the remainig next hops (may be zero). */
+ return skip_next_hops(offset, encap->unigue.hop_count - encap->unigue.next_hop - 1);
+}
+
+/* Fill a bpf_sock_tuple to be used with the socket lookup functions.
+ * This is a kludge that let's us work around verifier limitations:
+ *
+ * fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321)
+ *
+ * clang will substitute a constant for sizeof, which allows the verifier
+ * to track it's value. Based on this, it can figure out the constant
+ * return value, and calling code works while still being "generic" to
+ * IPv4 and IPv6.
+ */
+static uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph,
+ uint64_t iphlen, uint16_t sport, uint16_t dport)
+{
+ switch (iphlen) {
+ case sizeof(struct iphdr): {
+ struct iphdr *ipv4 = (struct iphdr *)iph;
+ tuple->ipv4.daddr = ipv4->daddr;
+ tuple->ipv4.saddr = ipv4->saddr;
+ tuple->ipv4.sport = sport;
+ tuple->ipv4.dport = dport;
+ return sizeof(tuple->ipv4);
+ }
+
+ case sizeof(struct ipv6hdr): {
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)iph;
+ memcpy(&tuple->ipv6.daddr, &ipv6->daddr,
+ sizeof(tuple->ipv6.daddr));
+ memcpy(&tuple->ipv6.saddr, &ipv6->saddr,
+ sizeof(tuple->ipv6.saddr));
+ tuple->ipv6.sport = sport;
+ tuple->ipv6.dport = dport;
+ return sizeof(tuple->ipv6);
+ }
+
+ default:
+ return 0;
+ }
+}
+
+static verdict_t classify_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple,
+ uint64_t tuplen, void *iph, struct tcphdr *tcp)
+{
+ struct bpf_sock *sk =
+ bpf_skc_lookup_tcp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0);
+
+ if (sk == NULL)
+ return UNKNOWN;
+
+ if (sk->state != BPF_TCP_LISTEN) {
+ bpf_sk_release(sk);
+ return ESTABLISHED;
+ }
+
+ if (iph != NULL && tcp != NULL) {
+ /* Kludge: we've run out of arguments, but need the length of the ip header. */
+ uint64_t iphlen = sizeof(struct iphdr);
+
+ if (tuplen == sizeof(tuple->ipv6))
+ iphlen = sizeof(struct ipv6hdr);
+
+ if (bpf_tcp_check_syncookie(sk, iph, iphlen, tcp,
+ sizeof(*tcp)) == 0) {
+ bpf_sk_release(sk);
+ return SYN_COOKIE;
+ }
+ }
+
+ bpf_sk_release(sk);
+ return UNKNOWN;
+}
+
+static verdict_t classify_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, uint64_t tuplen)
+{
+ struct bpf_sock *sk =
+ bpf_sk_lookup_udp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0);
+
+ if (sk == NULL)
+ return UNKNOWN;
+
+ if (sk->state == BPF_TCP_ESTABLISHED) {
+ bpf_sk_release(sk);
+ return ESTABLISHED;
+ }
+
+ bpf_sk_release(sk);
+ return UNKNOWN;
+}
+
+static verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto, struct bpf_sock_tuple *tuple,
+ uint64_t tuplen, metrics_t *metrics)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ return classify_tcp(skb, tuple, tuplen, NULL, NULL);
+
+ case IPPROTO_UDP:
+ return classify_udp(skb, tuple, tuplen);
+
+ default:
+ metrics->errors_total_malformed_icmp++;
+ return INVALID;
+ }
+}
+
+static verdict_t process_icmpv4(struct __sk_buff *skb, struct bpf_dynptr *dynptr, __u64 *offset,
+ metrics_t *metrics)
+{
+ struct icmphdr icmp;
+ struct iphdr ipv4;
+
+ if (bpf_dynptr_read(&icmp, sizeof(icmp), dynptr, *offset, 0)) {
+ metrics->errors_total_malformed_icmp++;
+ return INVALID;
+ }
+
+ *offset += sizeof(icmp);
+
+ /* We should never receive encapsulated echo replies. */
+ if (icmp.type == ICMP_ECHOREPLY) {
+ metrics->errors_total_icmp_echo_replies++;
+ return INVALID;
+ }
+
+ if (icmp.type == ICMP_ECHO)
+ return ECHO_REQUEST;
+
+ if (icmp.type != ICMP_DEST_UNREACH || icmp.code != ICMP_FRAG_NEEDED) {
+ metrics->errors_total_unwanted_icmp++;
+ return INVALID;
+ }
+
+ if (pkt_parse_ipv4(dynptr, offset, &ipv4)) {
+ metrics->errors_total_malformed_icmp_pkt_too_big++;
+ return INVALID;
+ }
+
+ /* The source address in the outer IP header is from the entity that
+ * originated the ICMP message. Use the original IP header to restore
+ * the correct flow tuple.
+ */
+ struct bpf_sock_tuple tuple;
+ tuple.ipv4.saddr = ipv4.daddr;
+ tuple.ipv4.daddr = ipv4.saddr;
+
+ if (!pkt_parse_icmp_l4_ports(dynptr, offset, (flow_ports_t *)&tuple.ipv4.sport)) {
+ metrics->errors_total_malformed_icmp_pkt_too_big++;
+ return INVALID;
+ }
+
+ return classify_icmp(skb, ipv4.protocol, &tuple,
+ sizeof(tuple.ipv4), metrics);
+}
+
+static verdict_t process_icmpv6(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb,
+ metrics_t *metrics)
+{
+ struct bpf_sock_tuple tuple;
+ struct ipv6hdr ipv6;
+ struct icmp6hdr icmp6;
+ bool is_fragment;
+ uint8_t l4_proto;
+
+ if (bpf_dynptr_read(&icmp6, sizeof(icmp6), dynptr, *offset, 0)) {
+ metrics->errors_total_malformed_icmp++;
+ return INVALID;
+ }
+
+ /* We should never receive encapsulated echo replies. */
+ if (icmp6.icmp6_type == ICMPV6_ECHO_REPLY) {
+ metrics->errors_total_icmp_echo_replies++;
+ return INVALID;
+ }
+
+ if (icmp6.icmp6_type == ICMPV6_ECHO_REQUEST) {
+ return ECHO_REQUEST;
+ }
+
+ if (icmp6.icmp6_type != ICMPV6_PKT_TOOBIG) {
+ metrics->errors_total_unwanted_icmp++;
+ return INVALID;
+ }
+
+ if (pkt_parse_ipv6(dynptr, offset, &ipv6, &l4_proto, &is_fragment)) {
+ metrics->errors_total_malformed_icmp_pkt_too_big++;
+ return INVALID;
+ }
+
+ if (is_fragment) {
+ metrics->errors_total_fragmented_ip++;
+ return INVALID;
+ }
+
+ /* Swap source and dest addresses. */
+ memcpy(&tuple.ipv6.saddr, &ipv6.daddr, sizeof(tuple.ipv6.saddr));
+ memcpy(&tuple.ipv6.daddr, &ipv6.saddr, sizeof(tuple.ipv6.daddr));
+
+ if (!pkt_parse_icmp_l4_ports(dynptr, offset, (flow_ports_t *)&tuple.ipv6.sport)) {
+ metrics->errors_total_malformed_icmp_pkt_too_big++;
+ return INVALID;
+ }
+
+ return classify_icmp(skb, l4_proto, &tuple, sizeof(tuple.ipv6),
+ metrics);
+}
+
+static verdict_t process_tcp(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb,
+ struct iphdr_info *info, metrics_t *metrics)
+{
+ struct bpf_sock_tuple tuple;
+ struct tcphdr tcp;
+ uint64_t tuplen;
+
+ metrics->l4_protocol_packets_total_tcp++;
+
+ if (bpf_dynptr_read(&tcp, sizeof(tcp), dynptr, *offset, 0)) {
+ metrics->errors_total_malformed_tcp++;
+ return INVALID;
+ }
+
+ *offset += sizeof(tcp);
+
+ if (tcp.syn)
+ return SYN;
+
+ tuplen = fill_tuple(&tuple, info->hdr, info->len, tcp.source, tcp.dest);
+ return classify_tcp(skb, &tuple, tuplen, info->hdr, &tcp);
+}
+
+static verdict_t process_udp(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb,
+ struct iphdr_info *info, metrics_t *metrics)
+{
+ struct bpf_sock_tuple tuple;
+ struct udphdr udph;
+ uint64_t tuplen;
+
+ metrics->l4_protocol_packets_total_udp++;
+
+ if (bpf_dynptr_read(&udph, sizeof(udph), dynptr, *offset, 0)) {
+ metrics->errors_total_malformed_udp++;
+ return INVALID;
+ }
+ *offset += sizeof(udph);
+
+ tuplen = fill_tuple(&tuple, info->hdr, info->len, udph.source, udph.dest);
+ return classify_udp(skb, &tuple, tuplen);
+}
+
+static verdict_t process_ipv4(struct __sk_buff *skb, struct bpf_dynptr *dynptr,
+ __u64 *offset, metrics_t *metrics)
+{
+ struct iphdr ipv4;
+ struct iphdr_info info = {
+ .hdr = &ipv4,
+ .len = sizeof(ipv4),
+ };
+
+ metrics->l3_protocol_packets_total_ipv4++;
+
+ if (pkt_parse_ipv4(dynptr, offset, &ipv4)) {
+ metrics->errors_total_malformed_ip++;
+ return INVALID;
+ }
+
+ if (ipv4.version != 4) {
+ metrics->errors_total_malformed_ip++;
+ return INVALID;
+ }
+
+ if (ipv4_is_fragment(&ipv4)) {
+ metrics->errors_total_fragmented_ip++;
+ return INVALID;
+ }
+
+ switch (ipv4.protocol) {
+ case IPPROTO_ICMP:
+ return process_icmpv4(skb, dynptr, offset, metrics);
+
+ case IPPROTO_TCP:
+ return process_tcp(dynptr, offset, skb, &info, metrics);
+
+ case IPPROTO_UDP:
+ return process_udp(dynptr, offset, skb, &info, metrics);
+
+ default:
+ metrics->errors_total_unknown_l4_proto++;
+ return INVALID;
+ }
+}
+
+static verdict_t process_ipv6(struct __sk_buff *skb, struct bpf_dynptr *dynptr,
+ __u64 *offset, metrics_t *metrics)
+{
+ struct ipv6hdr ipv6;
+ struct iphdr_info info = {
+ .hdr = &ipv6,
+ .len = sizeof(ipv6),
+ };
+ uint8_t l4_proto;
+ bool is_fragment;
+
+ metrics->l3_protocol_packets_total_ipv6++;
+
+ if (pkt_parse_ipv6(dynptr, offset, &ipv6, &l4_proto, &is_fragment)) {
+ metrics->errors_total_malformed_ip++;
+ return INVALID;
+ }
+
+ if (ipv6.version != 6) {
+ metrics->errors_total_malformed_ip++;
+ return INVALID;
+ }
+
+ if (is_fragment) {
+ metrics->errors_total_fragmented_ip++;
+ return INVALID;
+ }
+
+ switch (l4_proto) {
+ case IPPROTO_ICMPV6:
+ return process_icmpv6(dynptr, offset, skb, metrics);
+
+ case IPPROTO_TCP:
+ return process_tcp(dynptr, offset, skb, &info, metrics);
+
+ case IPPROTO_UDP:
+ return process_udp(dynptr, offset, skb, &info, metrics);
+
+ default:
+ metrics->errors_total_unknown_l4_proto++;
+ return INVALID;
+ }
+}
+
+SEC("tc")
+int cls_redirect(struct __sk_buff *skb)
+{
+ __u8 encap_buffer[sizeof(encap_headers_t)] = {};
+ struct bpf_dynptr dynptr;
+ struct in_addr next_hop;
+ /* Tracks offset of the dynptr. This will be unnecessary once
+ * bpf_dynptr_advance() is available.
+ */
+ __u64 off = 0;
+ ret_t ret;
+
+ bpf_dynptr_from_skb(skb, 0, &dynptr);
+
+ metrics_t *metrics = get_global_metrics();
+ if (metrics == NULL)
+ return TC_ACT_SHOT;
+
+ metrics->processed_packets_total++;
+
+ /* Pass bogus packets as long as we're not sure they're
+ * destined for us.
+ */
+ if (skb->protocol != bpf_htons(ETH_P_IP))
+ return TC_ACT_OK;
+
+ encap_headers_t *encap;
+
+ /* Make sure that all encapsulation headers are available in
+ * the linear portion of the skb. This makes it easy to manipulate them.
+ */
+ if (bpf_skb_pull_data(skb, sizeof(*encap)))
+ return TC_ACT_OK;
+
+ encap = bpf_dynptr_slice_rdwr(&dynptr, 0, encap_buffer, sizeof(encap_buffer));
+ if (!encap)
+ return TC_ACT_OK;
+
+ off += sizeof(*encap);
+
+ if (encap->ip.ihl != 5)
+ /* We never have any options. */
+ return TC_ACT_OK;
+
+ if (encap->ip.daddr != ENCAPSULATION_IP ||
+ encap->ip.protocol != IPPROTO_UDP)
+ return TC_ACT_OK;
+
+ /* TODO Check UDP length? */
+ if (encap->udp.dest != ENCAPSULATION_PORT)
+ return TC_ACT_OK;
+
+ /* We now know that the packet is destined to us, we can
+ * drop bogus ones.
+ */
+ if (ipv4_is_fragment((void *)&encap->ip)) {
+ metrics->errors_total_fragmented_ip++;
+ return TC_ACT_SHOT;
+ }
+
+ if (encap->gue.variant != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (encap->gue.control != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (encap->gue.flags != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (encap->gue.hlen !=
+ sizeof(encap->unigue) / 4 + encap->unigue.hop_count) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (encap->unigue.version != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (encap->unigue.reserved != 0)
+ return TC_ACT_SHOT;
+
+ MAYBE_RETURN(get_next_hop(&dynptr, &off, encap, &next_hop));
+
+ if (next_hop.s_addr == 0) {
+ metrics->accepted_packets_total_last_hop++;
+ return accept_locally(skb, encap);
+ }
+
+ verdict_t verdict;
+ switch (encap->gue.proto_ctype) {
+ case IPPROTO_IPIP:
+ verdict = process_ipv4(skb, &dynptr, &off, metrics);
+ break;
+
+ case IPPROTO_IPV6:
+ verdict = process_ipv6(skb, &dynptr, &off, metrics);
+ break;
+
+ default:
+ metrics->errors_total_unknown_l3_proto++;
+ return TC_ACT_SHOT;
+ }
+
+ switch (verdict) {
+ case INVALID:
+ /* metrics have already been bumped */
+ return TC_ACT_SHOT;
+
+ case UNKNOWN:
+ return forward_to_next_hop(skb, &dynptr, encap, &next_hop, metrics);
+
+ case ECHO_REQUEST:
+ metrics->accepted_packets_total_icmp_echo_request++;
+ break;
+
+ case SYN:
+ if (encap->unigue.forward_syn) {
+ return forward_to_next_hop(skb, &dynptr, encap, &next_hop,
+ metrics);
+ }
+
+ metrics->accepted_packets_total_syn++;
+ break;
+
+ case SYN_COOKIE:
+ metrics->accepted_packets_total_syn_cookies++;
+ break;
+
+ case ESTABLISHED:
+ metrics->accepted_packets_total_established++;
+ break;
+ }
+
+ ret = accept_locally(skb, encap);
+
+ if (encap == encap_buffer)
+ bpf_dynptr_write(&dynptr, 0, encap_buffer, sizeof(encap_buffer), 0);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_core_autosize.c b/tools/testing/selftests/bpf/progs/test_core_autosize.c
index 44f5aa2e8956..9a7829c5e4a7 100644
--- a/tools/testing/selftests/bpf/progs/test_core_autosize.c
+++ b/tools/testing/selftests/bpf/progs/test_core_autosize.c
@@ -125,6 +125,16 @@ int handle_downsize(void *ctx)
return 0;
}
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define bpf_core_read_int bpf_core_read
+#else
+#define bpf_core_read_int(dst, sz, src) ({ \
+ /* Prevent "subtraction from stack pointer prohibited" */ \
+ volatile long __off = sizeof(*dst) - (sz); \
+ bpf_core_read((char *)(dst) + __off, sz, src); \
+})
+#endif
+
SEC("raw_tp/sys_enter")
int handle_probed(void *ctx)
{
@@ -132,23 +142,23 @@ int handle_probed(void *ctx)
__u64 tmp;
tmp = 0;
- bpf_core_read(&tmp, bpf_core_field_size(in->ptr), &in->ptr);
+ bpf_core_read_int(&tmp, bpf_core_field_size(in->ptr), &in->ptr);
ptr_probed = tmp;
tmp = 0;
- bpf_core_read(&tmp, bpf_core_field_size(in->val1), &in->val1);
+ bpf_core_read_int(&tmp, bpf_core_field_size(in->val1), &in->val1);
val1_probed = tmp;
tmp = 0;
- bpf_core_read(&tmp, bpf_core_field_size(in->val2), &in->val2);
+ bpf_core_read_int(&tmp, bpf_core_field_size(in->val2), &in->val2);
val2_probed = tmp;
tmp = 0;
- bpf_core_read(&tmp, bpf_core_field_size(in->val3), &in->val3);
+ bpf_core_read_int(&tmp, bpf_core_field_size(in->val3), &in->val3);
val3_probed = tmp;
tmp = 0;
- bpf_core_read(&tmp, bpf_core_field_size(in->val4), &in->val4);
+ bpf_core_read_int(&tmp, bpf_core_field_size(in->val4), &in->val4);
val4_probed = tmp;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_core_extern.c b/tools/testing/selftests/bpf/progs/test_core_extern.c
index 3ac3603ad53d..a3c7c1042f35 100644
--- a/tools/testing/selftests/bpf/progs/test_core_extern.c
+++ b/tools/testing/selftests/bpf/progs/test_core_extern.c
@@ -11,6 +11,7 @@
static int (*bpf_missing_helper)(const void *arg1, int arg2) = (void *) 999;
extern int LINUX_KERNEL_VERSION __kconfig;
+extern int LINUX_UNKNOWN_VIRTUAL_EXTERN __kconfig __weak;
extern bool CONFIG_BPF_SYSCALL __kconfig; /* strong */
extern enum libbpf_tristate CONFIG_TRISTATE __kconfig __weak;
extern bool CONFIG_BOOL __kconfig __weak;
@@ -22,6 +23,7 @@ extern const char CONFIG_STR[8] __kconfig __weak;
extern uint64_t CONFIG_MISSING __kconfig __weak;
uint64_t kern_ver = -1;
+uint64_t unkn_virt_val = -1;
uint64_t bpf_syscall = -1;
uint64_t tristate_val = -1;
uint64_t bool_val = -1;
@@ -38,6 +40,7 @@ int handle_sys_enter(struct pt_regs *ctx)
int i;
kern_ver = LINUX_KERNEL_VERSION;
+ unkn_virt_val = LINUX_UNKNOWN_VIRTUAL_EXTERN;
bpf_syscall = CONFIG_BPF_SYSCALL;
tristate_val = CONFIG_TRISTATE;
bool_val = CONFIG_BOOL;
diff --git a/tools/testing/selftests/bpf/progs/test_core_read_macros.c b/tools/testing/selftests/bpf/progs/test_core_read_macros.c
new file mode 100644
index 000000000000..873d85a4739b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_read_macros.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* shuffled layout for relocatable (CO-RE) reads */
+struct callback_head___shuffled {
+ void (*func)(struct callback_head___shuffled *head);
+ struct callback_head___shuffled *next;
+};
+
+struct callback_head k_probe_in = {};
+struct callback_head___shuffled k_core_in = {};
+
+struct callback_head *u_probe_in = 0;
+struct callback_head___shuffled *u_core_in = 0;
+
+long k_probe_out = 0;
+long u_probe_out = 0;
+
+long k_core_out = 0;
+long u_core_out = 0;
+
+int my_pid = 0;
+
+SEC("raw_tracepoint/sys_enter")
+int handler(void *ctx)
+{
+ int pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (my_pid != pid)
+ return 0;
+
+ /* next pointers for kernel address space have to be initialized from
+ * BPF side, user-space mmaped addresses are still user-space addresses
+ */
+ k_probe_in.next = &k_probe_in;
+ __builtin_preserve_access_index(({k_core_in.next = &k_core_in;}));
+
+ k_probe_out = (long)BPF_PROBE_READ(&k_probe_in, next, next, func);
+ k_core_out = (long)BPF_CORE_READ(&k_core_in, next, next, func);
+ u_probe_out = (long)BPF_PROBE_READ_USER(u_probe_in, next, next, func);
+ u_core_out = (long)BPF_CORE_READ_USER(u_core_in, next, next, func);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
index 51b3f79df523..448403634eea 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
@@ -15,6 +15,7 @@ struct {
struct core_reloc_arrays_output {
int a2;
+ int a3;
char b123;
int c1c;
int d00d;
@@ -41,6 +42,7 @@ int test_core_arrays(void *ctx)
{
struct core_reloc_arrays *in = (void *)&data.in;
struct core_reloc_arrays_output *out = (void *)&data.out;
+ int *a;
if (CORE_READ(&out->a2, &in->a[2]))
return 1;
@@ -53,6 +55,9 @@ int test_core_arrays(void *ctx)
if (CORE_READ(&out->f01c, &in->f[0][1].c))
return 1;
+ a = __builtin_preserve_access_index(({ in->a; }));
+ out->a3 = a[0] + a[1] + a[2] + a[3];
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
index ab1e647aeb31..b86fdda2a6ea 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
@@ -42,7 +42,6 @@ int test_core_bitfields(void *ctx)
{
struct core_reloc_bitfields *in = (void *)&data.in;
struct core_reloc_bitfields_output *out = (void *)&data.out;
- uint64_t res;
out->ub1 = BPF_CORE_READ_BITFIELD_PROBED(in, ub1);
out->ub2 = BPF_CORE_READ_BITFIELD_PROBED(in, ub2);
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c b/tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c
new file mode 100644
index 000000000000..63147fbfae6e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ char in[256];
+ char out[256];
+ bool skip;
+} data = {};
+
+enum named_unsigned_enum64 {
+ UNSIGNED_ENUM64_VAL1 = 0x1ffffffffULL,
+ UNSIGNED_ENUM64_VAL2 = 0x2ffffffffULL,
+ UNSIGNED_ENUM64_VAL3 = 0x3ffffffffULL,
+};
+
+enum named_signed_enum64 {
+ SIGNED_ENUM64_VAL1 = 0x1ffffffffLL,
+ SIGNED_ENUM64_VAL2 = -2,
+ SIGNED_ENUM64_VAL3 = 0x3ffffffffLL,
+};
+
+struct core_reloc_enum64val_output {
+ bool unsigned_val1_exists;
+ bool unsigned_val2_exists;
+ bool unsigned_val3_exists;
+ bool signed_val1_exists;
+ bool signed_val2_exists;
+ bool signed_val3_exists;
+
+ long unsigned_val1;
+ long unsigned_val2;
+ long signed_val1;
+ long signed_val2;
+};
+
+SEC("raw_tracepoint/sys_enter")
+int test_core_enum64val(void *ctx)
+{
+#if __clang_major__ >= 15
+ struct core_reloc_enum64val_output *out = (void *)&data.out;
+ enum named_unsigned_enum64 named_unsigned = 0;
+ enum named_signed_enum64 named_signed = 0;
+
+ out->unsigned_val1_exists = bpf_core_enum_value_exists(named_unsigned, UNSIGNED_ENUM64_VAL1);
+ out->unsigned_val2_exists = bpf_core_enum_value_exists(enum named_unsigned_enum64, UNSIGNED_ENUM64_VAL2);
+ out->unsigned_val3_exists = bpf_core_enum_value_exists(enum named_unsigned_enum64, UNSIGNED_ENUM64_VAL3);
+ out->signed_val1_exists = bpf_core_enum_value_exists(named_signed, SIGNED_ENUM64_VAL1);
+ out->signed_val2_exists = bpf_core_enum_value_exists(enum named_signed_enum64, SIGNED_ENUM64_VAL2);
+ out->signed_val3_exists = bpf_core_enum_value_exists(enum named_signed_enum64, SIGNED_ENUM64_VAL3);
+
+ out->unsigned_val1 = bpf_core_enum_value(named_unsigned, UNSIGNED_ENUM64_VAL1);
+ out->unsigned_val2 = bpf_core_enum_value(named_unsigned, UNSIGNED_ENUM64_VAL2);
+ out->signed_val1 = bpf_core_enum_value(named_signed, SIGNED_ENUM64_VAL1);
+ out->signed_val2 = bpf_core_enum_value(named_signed, SIGNED_ENUM64_VAL2);
+ /* NAMED_ENUM64_VAL3 value is optional */
+
+#else
+ data.skip = true;
+#endif
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c b/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
index 7e45e2bdf6cd..5b8a75097ea3 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
@@ -45,35 +45,34 @@ int test_core_existence(void *ctx)
struct core_reloc_existence_output *out = (void *)&data.out;
out->a_exists = bpf_core_field_exists(in->a);
- if (bpf_core_field_exists(in->a))
+ if (bpf_core_field_exists(struct core_reloc_existence, a))
out->a_value = BPF_CORE_READ(in, a);
else
out->a_value = 0xff000001u;
out->b_exists = bpf_core_field_exists(in->b);
- if (bpf_core_field_exists(in->b))
+ if (bpf_core_field_exists(struct core_reloc_existence, b))
out->b_value = BPF_CORE_READ(in, b);
else
out->b_value = 0xff000002u;
out->c_exists = bpf_core_field_exists(in->c);
- if (bpf_core_field_exists(in->c))
+ if (bpf_core_field_exists(struct core_reloc_existence, c))
out->c_value = BPF_CORE_READ(in, c);
else
out->c_value = 0xff000003u;
out->arr_exists = bpf_core_field_exists(in->arr);
- if (bpf_core_field_exists(in->arr))
+ if (bpf_core_field_exists(struct core_reloc_existence, arr))
out->arr_value = BPF_CORE_READ(in, arr[0]);
else
out->arr_value = 0xff000004u;
out->s_exists = bpf_core_field_exists(in->s);
- if (bpf_core_field_exists(in->s))
+ if (bpf_core_field_exists(struct core_reloc_existence, s))
out->s_value = BPF_CORE_READ(in, s.x);
else
out->s_value = 0xff000005u;
return 0;
}
-
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
index 145028b52ad8..ee4a601dcb06 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
@@ -21,6 +21,7 @@ struct core_reloc_kernel_output {
/* we have test_progs[-flavor], so cut flavor part */
char comm[sizeof("test_progs")];
int comm_len;
+ bool local_task_struct_matches;
};
struct task_struct {
@@ -30,15 +31,29 @@ struct task_struct {
struct task_struct *group_leader;
};
+struct mm_struct___wrong {
+ int abc_whatever_should_not_exist;
+};
+
+struct task_struct___local {
+ int pid;
+ struct mm_struct___wrong *mm;
+};
+
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_kernel(void *ctx)
{
+ /* Support for the BPF_TYPE_MATCHES argument to the
+ * __builtin_preserve_type_info builtin was added at some point during
+ * development of clang 15 and it's what we require for this test.
+ */
+#if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
struct task_struct *task = (void *)bpf_get_current_task();
struct core_reloc_kernel_output *out = (void *)&data.out;
uint64_t pid_tgid = bpf_get_current_pid_tgid();
- uint32_t real_tgid = (uint32_t)pid_tgid;
+ int32_t real_tgid = (int32_t)pid_tgid;
int pid, tgid;
if (data.my_pid_tgid != pid_tgid)
@@ -93,6 +108,10 @@ int test_core_kernel(void *ctx)
group_leader, group_leader, group_leader, group_leader,
comm);
+ out->local_task_struct_matches = bpf_core_type_matches(struct task_struct___local);
+#else
+ data.skip = true;
+#endif
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
index 8b533db4a7a5..b2ded497572a 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
@@ -42,7 +42,16 @@ struct core_reloc_mods {
core_reloc_mods_substruct_t h;
};
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
+#else
+#define CORE_READ(dst, src) ({ \
+ int __sz = sizeof(*(dst)) < sizeof(*(src)) ? sizeof(*(dst)) : \
+ sizeof(*(src)); \
+ bpf_core_read((char *)(dst) + sizeof(*(dst)) - __sz, __sz, \
+ (const char *)(src) + sizeof(*(src)) - __sz); \
+})
+#endif
SEC("raw_tracepoint/sys_enter")
int test_core_mods(void *ctx)
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_module.c b/tools/testing/selftests/bpf/progs/test_core_reloc_module.c
new file mode 100644
index 000000000000..bcb31ff92dcc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_module.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct bpf_testmod_test_read_ctx {
+ /* field order is mixed up */
+ size_t len;
+ char *buf;
+ loff_t off;
+} __attribute__((preserve_access_index));
+
+struct {
+ char in[256];
+ char out[256];
+ bool skip;
+ uint64_t my_pid_tgid;
+} data = {};
+
+struct core_reloc_module_output {
+ long long len;
+ long long off;
+ int read_ctx_sz;
+ bool read_ctx_exists;
+ bool buf_exists;
+ bool len_exists;
+ bool off_exists;
+ /* we have test_progs[-flavor], so cut flavor part */
+ char comm[sizeof("test_progs")];
+ int comm_len;
+};
+
+SEC("raw_tp/bpf_testmod_test_read")
+int BPF_PROG(test_core_module_probed,
+ struct task_struct *task,
+ struct bpf_testmod_test_read_ctx *read_ctx)
+{
+#if __has_builtin(__builtin_preserve_enum_value)
+ struct core_reloc_module_output *out = (void *)&data.out;
+ __u64 pid_tgid = bpf_get_current_pid_tgid();
+ __s32 real_tgid = (__s32)(pid_tgid >> 32);
+ __s32 real_pid = (__s32)pid_tgid;
+
+ if (data.my_pid_tgid != pid_tgid)
+ return 0;
+
+ if (BPF_CORE_READ(task, pid) != real_pid || BPF_CORE_READ(task, tgid) != real_tgid)
+ return 0;
+
+ out->len = BPF_CORE_READ(read_ctx, len);
+ out->off = BPF_CORE_READ(read_ctx, off);
+
+ out->read_ctx_sz = bpf_core_type_size(struct bpf_testmod_test_read_ctx);
+ out->read_ctx_exists = bpf_core_type_exists(struct bpf_testmod_test_read_ctx);
+ out->buf_exists = bpf_core_field_exists(read_ctx->buf);
+ out->off_exists = bpf_core_field_exists(read_ctx->off);
+ out->len_exists = bpf_core_field_exists(read_ctx->len);
+
+ out->comm_len = BPF_CORE_READ_STR_INTO(&out->comm, task, comm);
+#else
+ data.skip = true;
+#endif
+
+ return 0;
+}
+
+SEC("tp_btf/bpf_testmod_test_read")
+int BPF_PROG(test_core_module_direct,
+ struct task_struct *task,
+ struct bpf_testmod_test_read_ctx *read_ctx)
+{
+#if __has_builtin(__builtin_preserve_enum_value)
+ struct core_reloc_module_output *out = (void *)&data.out;
+ __u64 pid_tgid = bpf_get_current_pid_tgid();
+ __s32 real_tgid = (__s32)(pid_tgid >> 32);
+ __s32 real_pid = (__s32)pid_tgid;
+
+ if (data.my_pid_tgid != pid_tgid)
+ return 0;
+
+ if (task->pid != real_pid || task->tgid != real_tgid)
+ return 0;
+
+ out->len = read_ctx->len;
+ out->off = read_ctx->off;
+
+ out->read_ctx_sz = bpf_core_type_size(struct bpf_testmod_test_read_ctx);
+ out->read_ctx_exists = bpf_core_type_exists(struct bpf_testmod_test_read_ctx);
+ out->buf_exists = bpf_core_field_exists(read_ctx->buf);
+ out->off_exists = bpf_core_field_exists(read_ctx->off);
+ out->len_exists = bpf_core_field_exists(read_ctx->len);
+
+ out->comm_len = BPF_CORE_READ_STR_INTO(&out->comm, task, comm);
+#else
+ data.skip = true;
+#endif
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
index d7fb6cfc7891..5b686053ce42 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
@@ -15,12 +15,21 @@ struct {
struct core_reloc_size_output {
int int_sz;
+ int int_off;
int struct_sz;
+ int struct_off;
int union_sz;
+ int union_off;
int arr_sz;
+ int arr_off;
int arr_elem_sz;
+ int arr_elem_off;
int ptr_sz;
+ int ptr_off;
int enum_sz;
+ int enum_off;
+ int float_sz;
+ int float_off;
};
struct core_reloc_size {
@@ -30,6 +39,7 @@ struct core_reloc_size {
int arr_field[4];
void *ptr_field;
enum { VALUE = 123 } enum_field;
+ float float_field;
};
SEC("raw_tracepoint/sys_enter")
@@ -39,12 +49,28 @@ int test_core_size(void *ctx)
struct core_reloc_size_output *out = (void *)&data.out;
out->int_sz = bpf_core_field_size(in->int_field);
+ out->int_off = bpf_core_field_offset(in->int_field);
+
out->struct_sz = bpf_core_field_size(in->struct_field);
+ out->struct_off = bpf_core_field_offset(in->struct_field);
+
out->union_sz = bpf_core_field_size(in->union_field);
+ out->union_off = bpf_core_field_offset(in->union_field);
+
out->arr_sz = bpf_core_field_size(in->arr_field);
- out->arr_elem_sz = bpf_core_field_size(in->arr_field[0]);
- out->ptr_sz = bpf_core_field_size(in->ptr_field);
- out->enum_sz = bpf_core_field_size(in->enum_field);
+ out->arr_off = bpf_core_field_offset(in->arr_field);
+
+ out->arr_elem_sz = bpf_core_field_size(struct core_reloc_size, arr_field[1]);
+ out->arr_elem_off = bpf_core_field_offset(struct core_reloc_size, arr_field[1]);
+
+ out->ptr_sz = bpf_core_field_size(struct core_reloc_size, ptr_field);
+ out->ptr_off = bpf_core_field_offset(struct core_reloc_size, ptr_field);
+
+ out->enum_sz = bpf_core_field_size(struct core_reloc_size, enum_field);
+ out->enum_off = bpf_core_field_offset(struct core_reloc_size, enum_field);
+
+ out->float_sz = bpf_core_field_size(struct core_reloc_size, float_field);
+ out->float_off = bpf_core_field_offset(struct core_reloc_size, float_field);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c b/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c
index fb60f8195c53..2edb4df35e6e 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c
@@ -19,6 +19,14 @@ struct a_struct {
int x;
};
+struct a_complex_struct {
+ union {
+ struct a_struct *a;
+ void *b;
+ } x;
+ volatile long y;
+};
+
union a_union {
int y;
int z;
@@ -43,6 +51,7 @@ typedef int int_typedef;
typedef enum { TYPEDEF_ENUM_VAL1, TYPEDEF_ENUM_VAL2 } enum_typedef;
typedef void *void_ptr_typedef;
+typedef int *restrict restrict_ptr_typedef;
typedef int (*func_proto_typedef)(long);
@@ -50,6 +59,7 @@ typedef char arr_typedef[20];
struct core_reloc_type_based_output {
bool struct_exists;
+ bool complex_struct_exists;
bool union_exists;
bool enum_exists;
bool typedef_named_struct_exists;
@@ -58,9 +68,24 @@ struct core_reloc_type_based_output {
bool typedef_int_exists;
bool typedef_enum_exists;
bool typedef_void_ptr_exists;
+ bool typedef_restrict_ptr_exists;
bool typedef_func_proto_exists;
bool typedef_arr_exists;
+ bool struct_matches;
+ bool complex_struct_matches;
+ bool union_matches;
+ bool enum_matches;
+ bool typedef_named_struct_matches;
+ bool typedef_anon_struct_matches;
+ bool typedef_struct_ptr_matches;
+ bool typedef_int_matches;
+ bool typedef_enum_matches;
+ bool typedef_void_ptr_matches;
+ bool typedef_restrict_ptr_matches;
+ bool typedef_func_proto_matches;
+ bool typedef_arr_matches;
+
int struct_sz;
int union_sz;
int enum_sz;
@@ -77,10 +102,17 @@ struct core_reloc_type_based_output {
SEC("raw_tracepoint/sys_enter")
int test_core_type_based(void *ctx)
{
-#if __has_builtin(__builtin_preserve_type_info)
+ /* Support for the BPF_TYPE_MATCHES argument to the
+ * __builtin_preserve_type_info builtin was added at some point during
+ * development of clang 15 and it's what we require for this test. Part of it
+ * could run with merely __builtin_preserve_type_info (which could be checked
+ * separately), but we have to find an upper bound.
+ */
+#if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
struct core_reloc_type_based_output *out = (void *)&data.out;
out->struct_exists = bpf_core_type_exists(struct a_struct);
+ out->complex_struct_exists = bpf_core_type_exists(struct a_complex_struct);
out->union_exists = bpf_core_type_exists(union a_union);
out->enum_exists = bpf_core_type_exists(enum an_enum);
out->typedef_named_struct_exists = bpf_core_type_exists(named_struct_typedef);
@@ -89,9 +121,24 @@ int test_core_type_based(void *ctx)
out->typedef_int_exists = bpf_core_type_exists(int_typedef);
out->typedef_enum_exists = bpf_core_type_exists(enum_typedef);
out->typedef_void_ptr_exists = bpf_core_type_exists(void_ptr_typedef);
+ out->typedef_restrict_ptr_exists = bpf_core_type_exists(restrict_ptr_typedef);
out->typedef_func_proto_exists = bpf_core_type_exists(func_proto_typedef);
out->typedef_arr_exists = bpf_core_type_exists(arr_typedef);
+ out->struct_matches = bpf_core_type_matches(struct a_struct);
+ out->complex_struct_matches = bpf_core_type_matches(struct a_complex_struct);
+ out->union_matches = bpf_core_type_matches(union a_union);
+ out->enum_matches = bpf_core_type_matches(enum an_enum);
+ out->typedef_named_struct_matches = bpf_core_type_matches(named_struct_typedef);
+ out->typedef_anon_struct_matches = bpf_core_type_matches(anon_struct_typedef);
+ out->typedef_struct_ptr_matches = bpf_core_type_matches(struct_ptr_typedef);
+ out->typedef_int_matches = bpf_core_type_matches(int_typedef);
+ out->typedef_enum_matches = bpf_core_type_matches(enum_typedef);
+ out->typedef_void_ptr_matches = bpf_core_type_matches(void_ptr_typedef);
+ out->typedef_restrict_ptr_matches = bpf_core_type_matches(restrict_ptr_typedef);
+ out->typedef_func_proto_matches = bpf_core_type_matches(func_proto_typedef);
+ out->typedef_arr_matches = bpf_core_type_matches(arr_typedef);
+
out->struct_sz = bpf_core_type_size(struct a_struct);
out->union_sz = bpf_core_type_size(union a_union);
out->enum_sz = bpf_core_type_size(enum an_enum);
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
index 22aba3f6e344..6fc8b9d66e34 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
@@ -80,7 +80,7 @@ int test_core_type_id(void *ctx)
* to detect whether this test has to be executed, however strange
* that might look like.
*
- * [0] https://reviews.llvm.org/D85174
+ * [0] https://github.com/llvm/llvm-project/commit/00602ee7ef0bf6c68d690a2bd729c12b95c95c99
*/
#if __has_builtin(__builtin_preserve_type_info)
struct core_reloc_type_id_output *out = (void *)&data.out;
diff --git a/tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c b/tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c
new file mode 100644
index 000000000000..4061f701ca50
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+const volatile int my_pid;
+
+bool abc1_called;
+bool abc2_called;
+bool custom1_called;
+bool custom2_called;
+bool kprobe1_called;
+bool xyz_called;
+
+SEC("abc")
+int abc1(void *ctx)
+{
+ abc1_called = true;
+ return 0;
+}
+
+SEC("abc/whatever")
+int abc2(void *ctx)
+{
+ abc2_called = true;
+ return 0;
+}
+
+SEC("custom")
+int custom1(void *ctx)
+{
+ custom1_called = true;
+ return 0;
+}
+
+SEC("custom/something")
+int custom2(void *ctx)
+{
+ custom2_called = true;
+ return 0;
+}
+
+SEC("kprobe")
+int kprobe1(void *ctx)
+{
+ kprobe1_called = true;
+ return 0;
+}
+
+SEC("xyz/blah")
+int xyz(void *ctx)
+{
+ int whatever;
+
+ /* use sleepable helper, custom handler should set sleepable flag */
+ bpf_copy_from_user(&whatever, sizeof(whatever), NULL);
+ xyz_called = true;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c b/tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c
new file mode 100644
index 000000000000..27c27cff6a3a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+extern const int bpf_prog_active __ksym;
+
+SEC("fentry/security_inode_getattr")
+int BPF_PROG(d_path_check_rdonly_mem, struct path *path, struct kstat *stat,
+ __u32 request_mask, unsigned int query_flags)
+{
+ void *active;
+ __u32 cpu;
+
+ cpu = bpf_get_smp_processor_id();
+ active = (void *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
+ if (active) {
+ /* FAIL here! 'active' points to readonly memory. bpf helpers
+ * that update its arguments can not write into it.
+ */
+ bpf_d_path(path, active, sizeof(int));
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_d_path_check_types.c b/tools/testing/selftests/bpf/progs/test_d_path_check_types.c
new file mode 100644
index 000000000000..7e02b7361307
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_d_path_check_types.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+extern const int bpf_prog_active __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 1 << 12);
+} ringbuf SEC(".maps");
+
+SEC("fentry/security_inode_getattr")
+int BPF_PROG(d_path_check_rdonly_mem, struct path *path, struct kstat *stat,
+ __u32 request_mask, unsigned int query_flags)
+{
+ void *active;
+ u32 cpu;
+
+ cpu = bpf_get_smp_processor_id();
+ active = (void *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
+ if (active) {
+ /* FAIL here! 'active' points to 'regular' memory. It
+ * cannot be submitted to ring buffer.
+ */
+ bpf_ringbuf_submit(active, 0);
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_deny_namespace.c b/tools/testing/selftests/bpf/progs/test_deny_namespace.c
new file mode 100644
index 000000000000..e96b901a733c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_deny_namespace.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+#include <linux/capability.h>
+
+typedef struct { unsigned long long val; } kernel_cap_t;
+
+struct cred {
+ kernel_cap_t cap_effective;
+} __attribute__((preserve_access_index));
+
+char _license[] SEC("license") = "GPL";
+
+SEC("lsm.s/userns_create")
+int BPF_PROG(test_userns_create, const struct cred *cred, int ret)
+{
+ kernel_cap_t caps = cred->cap_effective;
+ __u64 cap_mask = 1ULL << CAP_SYS_ADMIN;
+
+ if (ret)
+ return 0;
+
+ ret = -EPERM;
+ if (caps.val & cap_mask)
+ return 0;
+
+ return -EPERM;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_enable_stats.c b/tools/testing/selftests/bpf/progs/test_enable_stats.c
index 01a002ade529..1705097d01d7 100644
--- a/tools/testing/selftests/bpf/progs/test_enable_stats.c
+++ b/tools/testing/selftests/bpf/progs/test_enable_stats.c
@@ -13,6 +13,6 @@ __u64 count = 0;
SEC("raw_tracepoint/sys_enter")
int test_enable_stats(void *ctx)
{
- count += 1;
+ __sync_fetch_and_add(&count, 1);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_fill_link_info.c b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
new file mode 100644
index 000000000000..fac33a14f200
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+
+extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
+extern bool CONFIG_PPC_FTRACE_OUT_OF_LINE __kconfig __weak;
+extern bool CONFIG_KPROBES_ON_FTRACE __kconfig __weak;
+extern bool CONFIG_PPC64 __kconfig __weak;
+
+/* This function is here to have CONFIG_X86_KERNEL_IBT,
+ * CONFIG_PPC_FTRACE_OUT_OF_LINE, CONFIG_KPROBES_ON_FTRACE,
+ * CONFIG_PPC6 used and added to object BTF.
+ */
+int unused(void)
+{
+ return CONFIG_X86_KERNEL_IBT ||
+ CONFIG_PPC_FTRACE_OUT_OF_LINE ||
+ CONFIG_KPROBES_ON_FTRACE ||
+ CONFIG_PPC64 ? 0 : 1;
+}
+
+SEC("kprobe")
+int BPF_PROG(kprobe_run)
+{
+ return 0;
+}
+
+SEC("uprobe")
+int BPF_PROG(uprobe_run)
+{
+ return 0;
+}
+
+SEC("tracepoint")
+int BPF_PROG(tp_run)
+{
+ return 0;
+}
+
+SEC("perf_event")
+int event_run(void *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe.multi")
+int BPF_PROG(kmulti_run)
+{
+ return 0;
+}
+
+SEC("uprobe.multi")
+int BPF_PROG(umulti_run)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_fsverity.c b/tools/testing/selftests/bpf/progs/test_fsverity.c
new file mode 100644
index 000000000000..9e0f73e8189c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_fsverity.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_kfuncs.h"
+
+char _license[] SEC("license") = "GPL";
+
+#ifndef SHA256_DIGEST_SIZE
+#define SHA256_DIGEST_SIZE 32
+#endif
+
+#define SIZEOF_STRUCT_FSVERITY_DIGEST 4 /* sizeof(struct fsverity_digest) */
+
+char expected_digest[SIZEOF_STRUCT_FSVERITY_DIGEST + SHA256_DIGEST_SIZE];
+char digest[SIZEOF_STRUCT_FSVERITY_DIGEST + SHA256_DIGEST_SIZE];
+__u32 monitored_pid;
+__u32 got_fsverity;
+__u32 digest_matches;
+
+SEC("lsm.s/file_open")
+int BPF_PROG(test_file_open, struct file *f)
+{
+ struct bpf_dynptr digest_ptr;
+ __u32 pid;
+ int ret;
+ int i;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ bpf_dynptr_from_mem(digest, sizeof(digest), 0, &digest_ptr);
+ ret = bpf_get_fsverity_digest(f, &digest_ptr);
+ if (ret < 0)
+ return 0;
+ got_fsverity = 1;
+
+ for (i = 0; i < (int)sizeof(digest); i++) {
+ if (digest[i] != expected_digest[i])
+ return 0;
+ }
+
+ digest_matches = 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_get_xattr.c b/tools/testing/selftests/bpf/progs/test_get_xattr.c
new file mode 100644
index 000000000000..54305f4c9f2d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_get_xattr.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__u32 monitored_pid;
+__u32 found_xattr_from_file;
+__u32 found_xattr_from_dentry;
+
+static const char expected_value[] = "hello";
+char value1[32];
+char value2[32];
+
+/* Matches caller of test_get_xattr() in prog_tests/fs_kfuncs.c */
+static const char xattr_names[][64] = {
+ /* The following work. */
+ "user.kfuncs",
+ "security.bpf.xxx",
+
+ /* The following do not work. */
+ "security.bpf",
+ "security.selinux"
+};
+
+SEC("lsm.s/file_open")
+int BPF_PROG(test_file_open, struct file *f)
+{
+ struct bpf_dynptr value_ptr;
+ __u32 pid;
+ int ret, i;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ bpf_dynptr_from_mem(value1, sizeof(value1), 0, &value_ptr);
+
+ for (i = 0; i < ARRAY_SIZE(xattr_names); i++) {
+ ret = bpf_get_file_xattr(f, xattr_names[i], &value_ptr);
+ if (ret == sizeof(expected_value))
+ break;
+ }
+ if (ret != sizeof(expected_value))
+ return 0;
+ if (bpf_strncmp(value1, ret, expected_value))
+ return 0;
+ found_xattr_from_file = 1;
+ return 0;
+}
+
+SEC("lsm.s/inode_getxattr")
+int BPF_PROG(test_inode_getxattr, struct dentry *dentry, char *name)
+{
+ struct bpf_dynptr value_ptr;
+ __u32 pid;
+ int ret, i;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ bpf_dynptr_from_mem(value2, sizeof(value2), 0, &value_ptr);
+
+ for (i = 0; i < ARRAY_SIZE(xattr_names); i++) {
+ ret = bpf_get_dentry_xattr(dentry, xattr_names[i], &value_ptr);
+ if (ret == sizeof(expected_value))
+ break;
+ }
+ if (ret != sizeof(expected_value))
+ return 0;
+ if (bpf_strncmp(value2, ret, expected_value))
+ return 0;
+ found_xattr_from_dentry = 1;
+
+ /* return non-zero to fail getxattr from user space */
+ return -EINVAL;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_data.c b/tools/testing/selftests/bpf/progs/test_global_data.c
index 1319be1c54ba..719e314ef3e4 100644
--- a/tools/testing/selftests/bpf/progs/test_global_data.c
+++ b/tools/testing/selftests/bpf/progs/test_global_data.c
@@ -68,7 +68,7 @@ static struct foo struct3 = {
bpf_map_update_elem(&result_##map, &key, var, 0); \
} while (0)
-SEC("classifier/static_data_load")
+SEC("tc")
int load_static_data(struct __sk_buff *skb)
{
static const __u64 bar = ~0;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c
index 880260f6d536..fc69ff18880d 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func1.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func1.c
@@ -3,14 +3,15 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
-#ifndef MAX_STACK
-#define MAX_STACK (512 - 3 * 32 + 8)
-#endif
+#define MAX_STACK 260
static __attribute__ ((noinline))
int f0(int var, struct __sk_buff *skb)
{
+ asm volatile ("");
+
return skb->len;
}
@@ -19,6 +20,8 @@ int f1(struct __sk_buff *skb)
{
volatile char buf[MAX_STACK] = {};
+ __sink(buf[MAX_STACK - 1]);
+
return f0(0, skb) + skb->len;
}
@@ -27,6 +30,10 @@ int f3(int, struct __sk_buff *skb, int);
__attribute__ ((noinline))
int f2(int val, struct __sk_buff *skb)
{
+ volatile char buf[MAX_STACK] = {};
+
+ __sink(buf[MAX_STACK - 1]);
+
return f1(skb) + f3(val, skb, 1);
}
@@ -35,11 +42,14 @@ int f3(int val, struct __sk_buff *skb, int var)
{
volatile char buf[MAX_STACK] = {};
+ __sink(buf[MAX_STACK - 1]);
+
return skb->ifindex * val * var;
}
-SEC("classifier/test")
-int test_cls(struct __sk_buff *skb)
+SEC("tc")
+__failure __msg("combined stack size of 3 calls is")
+int global_func1(struct __sk_buff *skb)
{
return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func10.c b/tools/testing/selftests/bpf/progs/test_global_func10.c
new file mode 100644
index 000000000000..09d027bd3ea8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func10.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#if !defined(__clang__)
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+
+struct Small {
+ long x;
+};
+
+struct Big {
+ long x;
+ long y;
+};
+
+__noinline int foo(const struct Big *big)
+{
+ if (!big)
+ return 0;
+
+ return bpf_get_prandom_u32() < big->y;
+}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("invalid read from stack")
+int global_func10(struct __sk_buff *skb)
+{
+ const struct Small small = {.x = skb->len };
+
+ return foo((struct Big *)&small) ? 1 : 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func11.c b/tools/testing/selftests/bpf/progs/test_global_func11.c
new file mode 100644
index 000000000000..283e036dc401
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func11.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct S {
+ int x;
+};
+
+__noinline int foo(const struct S *s)
+{
+ return s ? bpf_get_prandom_u32() < s->x : 0;
+}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("Caller passes invalid args into func#1")
+int global_func11(struct __sk_buff *skb)
+{
+ return foo((const void *)skb);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func12.c b/tools/testing/selftests/bpf/progs/test_global_func12.c
new file mode 100644
index 000000000000..6e03d42519a6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func12.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct S {
+ int x;
+};
+
+__noinline int foo(const struct S *s)
+{
+ return bpf_get_prandom_u32() < s->x;
+}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("invalid mem access 'mem_or_null'")
+int global_func12(struct __sk_buff *skb)
+{
+ const struct S s = {.x = skb->len };
+
+ foo(&s);
+
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func13.c b/tools/testing/selftests/bpf/progs/test_global_func13.c
new file mode 100644
index 000000000000..02ea80da75b5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func13.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct S {
+ int x;
+};
+
+__noinline int foo(const struct S *s)
+{
+ if (s)
+ return bpf_get_prandom_u32() < s->x;
+
+ return 0;
+}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("Caller passes invalid args into func#1")
+int global_func13(struct __sk_buff *skb)
+{
+ const struct S *s = (const struct S *)(0xbedabeda);
+
+ return foo(s);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func14.c b/tools/testing/selftests/bpf/progs/test_global_func14.c
new file mode 100644
index 000000000000..33b7d5efd7b2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func14.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct S;
+
+__noinline int foo(const struct S *s)
+{
+ if (s)
+ return bpf_get_prandom_u32() < *(const int *) s;
+
+ return 0;
+}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("reference type('FWD S') size cannot be determined")
+int global_func14(struct __sk_buff *skb)
+{
+
+ return foo(NULL);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func15.c b/tools/testing/selftests/bpf/progs/test_global_func15.c
new file mode 100644
index 000000000000..201cc000b3f4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func15.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__noinline int foo(unsigned int *v)
+{
+ if (v)
+ *v = bpf_get_prandom_u32();
+
+ return 0;
+}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("At program exit the register R0 has ")
+int global_func15(struct __sk_buff *skb)
+{
+ unsigned int v = 1;
+
+ foo(&v);
+
+ return v;
+}
+
+SEC("cgroup_skb/ingress")
+__log_level(2) __flag(BPF_F_TEST_STATE_FREQ)
+__failure
+/* check that fallthrough code path marks r0 as precise */
+__msg("mark_precise: frame0: regs=r0 stack= before 2: (b7) r0 = 1")
+/* check that branch code path marks r0 as precise */
+__msg("mark_precise: frame0: regs=r0 stack= before 0: (85) call bpf_get_prandom_u32#7")
+__msg("At program exit the register R0 has ")
+__naked int global_func15_tricky_pruning(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 s> 1000 goto 1f;"
+ "r0 = 1;"
+ "1:"
+ "goto +0;" /* checkpoint */
+ /* cgroup_skb/ingress program is expected to return [0, 1]
+ * values, so branch above makes sure that in a fallthrough
+ * case we have a valid 1 stored in R0 register, but in
+ * a branch case we assign some random value to R0. So if
+ * there is something wrong with precision tracking for R0 at
+ * program exit, we might erroneously prune branch case,
+ * because R0 in fallthrough case is imprecise (and thus any
+ * value is valid from POV of verifier is_state_equal() logic)
+ */
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_common
+ );
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func16.c b/tools/testing/selftests/bpf/progs/test_global_func16.c
new file mode 100644
index 000000000000..e3e64bc472cd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func16.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__noinline int foo(int (*arr)[10])
+{
+ if (arr)
+ return (*arr)[9];
+
+ return 0;
+}
+
+SEC("cgroup_skb/ingress")
+__success
+int global_func16(struct __sk_buff *skb)
+{
+ int array[10];
+
+ const int rv = foo(&array);
+
+ return rv ? 1 : 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func17.c b/tools/testing/selftests/bpf/progs/test_global_func17.c
new file mode 100644
index 000000000000..5de44b09e8ec
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func17.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__noinline int foo(int *p)
+{
+ barrier_var(p);
+ return p ? (*p = 42) : 0;
+}
+
+const volatile int i;
+
+SEC("tc")
+__failure __msg("Caller passes invalid args into func#1")
+int global_func17(struct __sk_buff *skb)
+{
+ return foo((int *)&i);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func2.c b/tools/testing/selftests/bpf/progs/test_global_func2.c
index 2c18d82923a2..2beab9c3b68a 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func2.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func2.c
@@ -1,4 +1,49 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
#define MAX_STACK (512 - 3 * 32)
-#include "test_global_func1.c"
+
+static __attribute__ ((noinline))
+int f0(int var, struct __sk_buff *skb)
+{
+ return skb->len;
+}
+
+__attribute__ ((noinline))
+int f1(struct __sk_buff *skb)
+{
+ volatile char buf[MAX_STACK] = {};
+
+ __sink(buf[MAX_STACK - 1]);
+
+ return f0(0, skb) + skb->len;
+}
+
+int f3(int, struct __sk_buff *skb, int);
+
+__attribute__ ((noinline))
+int f2(int val, struct __sk_buff *skb)
+{
+ return f1(skb) + f3(val, skb, 1);
+}
+
+__attribute__ ((noinline))
+int f3(int val, struct __sk_buff *skb, int var)
+{
+ volatile char buf[MAX_STACK] = {};
+
+ __sink(buf[MAX_STACK - 1]);
+
+ return skb->ifindex * val * var;
+}
+
+SEC("tc")
+__success
+int global_func2(struct __sk_buff *skb)
+{
+ return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func3.c b/tools/testing/selftests/bpf/progs/test_global_func3.c
index 86f0ecb304fc..142b682d3c2f 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func3.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func3.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
@@ -46,20 +47,15 @@ int f7(struct __sk_buff *skb)
return f6(skb);
}
-#ifndef NO_FN8
__attribute__ ((noinline))
int f8(struct __sk_buff *skb)
{
return f7(skb);
}
-#endif
-SEC("classifier/test")
-int test_cls(struct __sk_buff *skb)
+SEC("tc")
+__failure __msg("the call stack of 8 frames")
+int global_func3(struct __sk_buff *skb)
{
-#ifndef NO_FN8
return f8(skb);
-#else
- return f7(skb);
-#endif
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func4.c b/tools/testing/selftests/bpf/progs/test_global_func4.c
index 610f75edf276..1733d87ad3f3 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func4.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func4.c
@@ -1,4 +1,55 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
-#define NO_FN8
-#include "test_global_func3.c"
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__attribute__ ((noinline))
+int f1(struct __sk_buff *skb)
+{
+ return skb->len;
+}
+
+__attribute__ ((noinline))
+int f2(int val, struct __sk_buff *skb)
+{
+ return f1(skb) + val;
+}
+
+__attribute__ ((noinline))
+int f3(int val, struct __sk_buff *skb, int var)
+{
+ return f2(var, skb) + val;
+}
+
+__attribute__ ((noinline))
+int f4(struct __sk_buff *skb)
+{
+ return f3(1, skb, 2);
+}
+
+__attribute__ ((noinline))
+int f5(struct __sk_buff *skb)
+{
+ return f4(skb);
+}
+
+__attribute__ ((noinline))
+int f6(struct __sk_buff *skb)
+{
+ return f5(skb);
+}
+
+__attribute__ ((noinline))
+int f7(struct __sk_buff *skb)
+{
+ return f6(skb);
+}
+
+SEC("tc")
+__success
+int global_func4(struct __sk_buff *skb)
+{
+ return f7(skb);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func5.c b/tools/testing/selftests/bpf/progs/test_global_func5.c
index 260c25b827ef..257c0569ff98 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func5.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func5.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
@@ -24,8 +25,9 @@ int f3(int val, struct __sk_buff *skb)
return skb->ifindex * val;
}
-SEC("classifier/test")
-int test_cls(struct __sk_buff *skb)
+SEC("tc")
+__failure __msg("expects pointer to ctx")
+int global_func5(struct __sk_buff *skb)
{
return f1(skb) + f2(2, skb) + f3(3, skb);
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func6.c b/tools/testing/selftests/bpf/progs/test_global_func6.c
index 69e19c64e10b..46c38c8f2cf0 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func6.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func6.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
@@ -24,8 +25,9 @@ int f3(int val, struct __sk_buff *skb)
return skb->ifindex * val;
}
-SEC("classifier/test")
-int test_cls(struct __sk_buff *skb)
+SEC("tc")
+__failure __msg("modified ctx ptr R2")
+int global_func6(struct __sk_buff *skb)
{
return f1(skb) + f2(2, skb) + f3(3, skb);
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func7.c b/tools/testing/selftests/bpf/progs/test_global_func7.c
index 309b3f6136bd..f182febfde3c 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func7.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func7.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__attribute__ ((noinline))
void foo(struct __sk_buff *skb)
@@ -10,8 +11,9 @@ void foo(struct __sk_buff *skb)
skb->tc_index = 0;
}
-SEC("classifier/test")
-int test_cls(struct __sk_buff *skb)
+SEC("tc")
+__failure __msg("foo() doesn't return scalar")
+int global_func7(struct __sk_buff *skb)
{
foo(skb);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func8.c b/tools/testing/selftests/bpf/progs/test_global_func8.c
new file mode 100644
index 000000000000..9b9c57fa2dd3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func8.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020 Facebook */
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__noinline int foo(struct __sk_buff *skb)
+{
+ return bpf_get_prandom_u32();
+}
+
+SEC("cgroup_skb/ingress")
+__success
+int global_func8(struct __sk_buff *skb)
+{
+ if (!foo(skb))
+ return 0;
+
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func9.c b/tools/testing/selftests/bpf/progs/test_global_func9.c
new file mode 100644
index 000000000000..1f2cb0159b8d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func9.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct S {
+ int x;
+};
+
+struct C {
+ int x;
+ int y;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct S);
+} map SEC(".maps");
+
+enum E {
+ E_ITEM
+};
+
+static int global_data_x = 100;
+static int volatile global_data_y = 500;
+
+__noinline int foo(const struct S *s)
+{
+ if (s)
+ return bpf_get_prandom_u32() < s->x;
+
+ return 0;
+}
+
+__noinline int bar(int *x)
+{
+ if (x)
+ *x &= bpf_get_prandom_u32();
+
+ return 0;
+}
+__noinline int baz(volatile int *x)
+{
+ if (x)
+ *x &= bpf_get_prandom_u32();
+
+ return 0;
+}
+
+__noinline int qux(enum E *e)
+{
+ if (e)
+ return *e;
+
+ return 0;
+}
+
+__noinline int quux(int (*arr)[10])
+{
+ if (arr)
+ return (*arr)[9];
+
+ return 0;
+}
+
+__noinline int quuz(int **p)
+{
+ if (p)
+ *p = NULL;
+
+ return 0;
+}
+
+SEC("cgroup_skb/ingress")
+__success
+int global_func9(struct __sk_buff *skb)
+{
+ int result = 0;
+
+ {
+ const struct S s = {.x = skb->len };
+
+ result |= foo(&s);
+ }
+
+ {
+ const __u32 key = 1;
+ const struct S *s = bpf_map_lookup_elem(&map, &key);
+
+ result |= foo(s);
+ }
+
+ {
+ const struct C c = {.x = skb->len, .y = skb->family };
+
+ result |= foo((const struct S *)&c);
+ }
+
+ {
+ result |= foo(NULL);
+ }
+
+ {
+ bar(&result);
+ bar(&global_data_x);
+ }
+
+ {
+ result |= baz(&global_data_y);
+ }
+
+ {
+ enum E e = E_ITEM;
+
+ result |= qux(&e);
+ }
+
+ {
+ int array[10] = {0};
+
+ result |= quux(&array);
+ }
+
+ {
+ int *p;
+
+ result |= quuz(&p);
+ }
+
+ return result ? 1 : 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func_args.c b/tools/testing/selftests/bpf/progs/test_global_func_args.c
new file mode 100644
index 000000000000..e712bf77daae
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func_args.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+
+#include <bpf/bpf_helpers.h>
+
+struct S {
+ int v;
+};
+
+struct S global_variable = {};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 7);
+ __type(key, __u32);
+ __type(value, int);
+} values SEC(".maps");
+
+static void save_value(__u32 index, int value)
+{
+ bpf_map_update_elem(&values, &index, &value, 0);
+}
+
+__noinline int foo(__u32 index, struct S *s)
+{
+ if (s) {
+ save_value(index, s->v);
+ return ++s->v;
+ }
+
+ save_value(index, 0);
+
+ return 1;
+}
+
+__noinline int bar(__u32 index, volatile struct S *s)
+{
+ if (s) {
+ save_value(index, s->v);
+ return ++s->v;
+ }
+
+ save_value(index, 0);
+
+ return 1;
+}
+
+__noinline int baz(struct S **s)
+{
+ if (s)
+ *s = 0;
+
+ return 0;
+}
+
+SEC("cgroup_skb/ingress")
+int test_cls(struct __sk_buff *skb)
+{
+ __u32 index = 0;
+
+ {
+ const int v = foo(index++, 0);
+
+ save_value(index++, v);
+ }
+
+ {
+ struct S s = { .v = 100 };
+
+ foo(index++, &s);
+ save_value(index++, s.v);
+ }
+
+ {
+ global_variable.v = 42;
+ bar(index++, &global_variable);
+ save_value(index++, global_variable.v);
+ }
+
+ {
+ struct S v, *p = &v;
+
+ baz(&p);
+ save_value(index++, !p);
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c
new file mode 100644
index 000000000000..143c8a4852bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+static long stack[256];
+
+/*
+ * KPROBE contexts
+ */
+
+__weak int kprobe_typedef_ctx_subprog(bpf_user_pt_regs_t *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?kprobe")
+__success
+int kprobe_typedef_ctx(void *ctx)
+{
+ return kprobe_typedef_ctx_subprog(ctx);
+}
+
+/* s390x defines:
+ *
+ * typedef user_pt_regs bpf_user_pt_regs_t;
+ * typedef struct { ... } user_pt_regs;
+ *
+ * And so "canonical" underlying struct type is anonymous.
+ * So on s390x only valid ways to have PTR_TO_CTX argument in global subprogs
+ * are:
+ * - bpf_user_pt_regs_t *ctx (typedef);
+ * - struct bpf_user_pt_regs_t *ctx (backwards compatible struct hack);
+ * - void *ctx __arg_ctx (arg:ctx tag)
+ *
+ * Other architectures also allow using underlying struct types (e.g.,
+ * `struct pt_regs *ctx` for x86-64)
+ */
+#ifndef bpf_target_s390
+
+#define pt_regs_struct_t typeof(*(__PT_REGS_CAST((struct pt_regs *)NULL)))
+
+__weak int kprobe_struct_ctx_subprog(pt_regs_struct_t *ctx)
+{
+ return bpf_get_stack((void *)ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?kprobe")
+__success
+int kprobe_resolved_ctx(void *ctx)
+{
+ return kprobe_struct_ctx_subprog(ctx);
+}
+
+#endif
+
+/* this is current hack to make this work on old kernels */
+struct bpf_user_pt_regs_t {};
+
+__weak int kprobe_workaround_ctx_subprog(struct bpf_user_pt_regs_t *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?kprobe")
+__success
+int kprobe_workaround_ctx(void *ctx)
+{
+ return kprobe_workaround_ctx_subprog(ctx);
+}
+
+/*
+ * RAW_TRACEPOINT contexts
+ */
+
+__weak int raw_tp_ctx_subprog(struct bpf_raw_tracepoint_args *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?raw_tp")
+__success
+int raw_tp_ctx(void *ctx)
+{
+ return raw_tp_ctx_subprog(ctx);
+}
+
+/*
+ * RAW_TRACEPOINT_WRITABLE contexts
+ */
+
+__weak int raw_tp_writable_ctx_subprog(struct bpf_raw_tracepoint_args *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?raw_tp")
+__success
+int raw_tp_writable_ctx(void *ctx)
+{
+ return raw_tp_writable_ctx_subprog(ctx);
+}
+
+/*
+ * PERF_EVENT contexts
+ */
+
+__weak int perf_event_ctx_subprog(struct bpf_perf_event_data *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?perf_event")
+__success
+int perf_event_ctx(void *ctx)
+{
+ return perf_event_ctx_subprog(ctx);
+}
+
+/* this global subprog can be now called from many types of entry progs, each
+ * with different context type
+ */
+__weak int subprog_ctx_tag(void *ctx __arg_ctx)
+{
+ return bpf_get_stack(ctx, stack, sizeof(stack), 0);
+}
+
+struct my_struct { int x; };
+
+__weak int subprog_multi_ctx_tags(void *ctx1 __arg_ctx,
+ struct my_struct *mem,
+ void *ctx2 __arg_ctx)
+{
+ if (!mem)
+ return 0;
+
+ return bpf_get_stack(ctx1, stack, sizeof(stack), 0) +
+ mem->x +
+ bpf_get_stack(ctx2, stack, sizeof(stack), 0);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+int arg_tag_ctx_raw_tp(void *ctx)
+{
+ struct my_struct x = { .x = 123 };
+
+ return subprog_ctx_tag(ctx) + subprog_multi_ctx_tags(ctx, &x, ctx);
+}
+
+SEC("?perf_event")
+__success __log_level(2)
+int arg_tag_ctx_perf(void *ctx)
+{
+ struct my_struct x = { .x = 123 };
+
+ return subprog_ctx_tag(ctx) + subprog_multi_ctx_tags(ctx, &x, ctx);
+}
+
+SEC("?kprobe")
+__success __log_level(2)
+int arg_tag_ctx_kprobe(void *ctx)
+{
+ struct my_struct x = { .x = 123 };
+
+ return subprog_ctx_tag(ctx) + subprog_multi_ctx_tags(ctx, &x, ctx);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
new file mode 100644
index 000000000000..ee65bad0436d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* rodata section */
+const volatile pid_t pid;
+const volatile size_t bss_array_len;
+const volatile size_t data_array_len;
+
+/* bss section */
+int sum = 0;
+int array[1];
+
+/* custom data section */
+int my_array[1] SEC(".data.custom");
+
+/* custom data section which should NOT be resizable,
+ * since it contains a single var which is not an array
+ */
+int my_int SEC(".data.non_array");
+
+/* custom data section which should NOT be resizable,
+ * since its last var is not an array
+ */
+int my_array_first[1] SEC(".data.array_not_last");
+int my_int_last SEC(".data.array_not_last");
+
+int percpu_arr[1] SEC(".data.percpu_arr");
+
+/* at least one extern is included, to ensure that a specific
+ * regression is tested whereby resizing resulted in a free-after-use
+ * bug after type information is invalidated by the resize operation.
+ *
+ * There isn't a particularly good API to test for this specific condition,
+ * but by having externs for the resizing tests it will cover this path.
+ */
+extern int LINUX_KERNEL_VERSION __kconfig;
+long version_sink;
+
+SEC("tp/syscalls/sys_enter_getpid")
+int bss_array_sum(void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ /* this will be zero, we just rely on verifier not rejecting this */
+ sum = percpu_arr[bpf_get_smp_processor_id()];
+
+ for (size_t i = 0; i < bss_array_len; ++i)
+ sum += array[i];
+
+ /* see above; ensure this is not optimized out */
+ version_sink = LINUX_KERNEL_VERSION;
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getuid")
+int data_array_sum(void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ /* this will be zero, we just rely on verifier not rejecting this */
+ sum = percpu_arr[bpf_get_smp_processor_id()];
+
+ for (size_t i = 0; i < data_array_len; ++i)
+ sum += my_array[i];
+
+ /* see above; ensure this is not optimized out */
+ version_sink = LINUX_KERNEL_VERSION;
+
+ return 0;
+}
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1)
+{
+ return 0;
+}
+
+struct bpf_testmod_ops {
+ int (*test_1)(void);
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops st_ops_resize = {
+ .test_1 = (void *)test_1
+};
diff --git a/tools/testing/selftests/bpf/progs/test_hash_large_key.c b/tools/testing/selftests/bpf/progs/test_hash_large_key.c
new file mode 100644
index 000000000000..8b438128f46b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_hash_large_key.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 2);
+ __type(key, struct bigelement);
+ __type(value, __u32);
+} hash_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct bigelement);
+} key_map SEC(".maps");
+
+struct bigelement {
+ int a;
+ char b[4096];
+ long long c;
+};
+
+SEC("raw_tracepoint/sys_enter")
+int bpf_hash_large_key_test(void *ctx)
+{
+ int zero = 0, value = 42;
+ struct bigelement *key;
+
+ key = bpf_map_lookup_elem(&key_map, &zero);
+ if (!key)
+ return 0;
+
+ key->c = 1;
+ if (bpf_map_update_elem(&hash_map, key, &value, BPF_ANY))
+ return 0;
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_helper_restricted.c b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
new file mode 100644
index 000000000000..5715c569ec03
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <time.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct timer {
+ struct bpf_timer t;
+};
+
+struct lock {
+ struct bpf_spin_lock l;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct timer);
+} timers SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct lock);
+} locks SEC(".maps");
+
+static int timer_cb(void *map, int *key, struct timer *timer)
+{
+ return 0;
+}
+
+static void timer_work(void)
+{
+ struct timer *timer;
+ const int key = 0;
+
+ timer = bpf_map_lookup_elem(&timers, &key);
+ if (timer) {
+ bpf_timer_init(&timer->t, &timers, CLOCK_MONOTONIC);
+ bpf_timer_set_callback(&timer->t, timer_cb);
+ bpf_timer_start(&timer->t, 10E9, 0);
+ bpf_timer_cancel(&timer->t);
+ }
+}
+
+static void spin_lock_work(void)
+{
+ const int key = 0;
+ struct lock *lock;
+
+ lock = bpf_map_lookup_elem(&locks, &key);
+ if (lock) {
+ bpf_spin_lock(&lock->l);
+ bpf_spin_unlock(&lock->l);
+ }
+}
+
+SEC("?raw_tp/sys_enter")
+int raw_tp_timer(void *ctx)
+{
+ timer_work();
+
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int tp_timer(void *ctx)
+{
+ timer_work();
+
+ return 0;
+}
+
+SEC("?kprobe")
+int kprobe_timer(void *ctx)
+{
+ timer_work();
+
+ return 0;
+}
+
+SEC("?perf_event")
+int perf_event_timer(void *ctx)
+{
+ timer_work();
+
+ return 0;
+}
+
+SEC("?raw_tp/sys_enter")
+int raw_tp_spin_lock(void *ctx)
+{
+ spin_lock_work();
+
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int tp_spin_lock(void *ctx)
+{
+ spin_lock_work();
+
+ return 0;
+}
+
+SEC("?kprobe")
+int kprobe_spin_lock(void *ctx)
+{
+ spin_lock_work();
+
+ return 0;
+}
+
+SEC("?perf_event")
+int perf_event_spin_lock(void *ctx)
+{
+ spin_lock_work();
+
+ return 0;
+}
+
+const char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_jhash.h b/tools/testing/selftests/bpf/progs/test_jhash.h
index c300734d26f6..ef53559bbbdf 100644
--- a/tools/testing/selftests/bpf/progs/test_jhash.h
+++ b/tools/testing/selftests/bpf/progs/test_jhash.h
@@ -69,3 +69,34 @@ u32 jhash(const void *key, u32 length, u32 initval)
return c;
}
+
+static __always_inline u32 jhash2(const u32 *k, u32 length, u32 initval)
+{
+ u32 a, b, c;
+
+ /* Set up the internal state */
+ a = b = c = JHASH_INITVAL + (length<<2) + initval;
+
+ /* Handle most of the key */
+ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
+ length -= 3;
+ k += 3;
+ }
+
+ /* Handle the last 3 u32's */
+ switch (length) {
+ case 3: c += k[2];
+ case 2: b += k[1];
+ case 1: a += k[0];
+ __jhash_final(a, b, c);
+ break;
+ case 0: /* Nothing left to add */
+ break;
+ }
+
+ return c;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_kernel_flag.c b/tools/testing/selftests/bpf/progs/test_kernel_flag.c
new file mode 100644
index 000000000000..b45fab3be352
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_kernel_flag.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2025 Microsoft Corporation
+ *
+ * Author: Blaise Boscaccy <bboscaccy@linux.microsoft.com>
+ */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 monitored_tid;
+
+SEC("lsm.s/bpf")
+int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ __u32 tid;
+
+ tid = bpf_get_current_pid_tgid() & 0xFFFFFFFF;
+ if (!kernel || tid != monitored_tid)
+ return 0;
+ else
+ return -EINVAL;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
new file mode 100644
index 000000000000..061befb004c2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
+extern void bpf_key_put(struct bpf_key *key) __ksym;
+extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr,
+ struct bpf_dynptr *sig_ptr,
+ struct bpf_key *trusted_keyring) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
+} ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} array_map SEC(".maps");
+
+int err, pid;
+
+char _license[] SEC("license") = "GPL";
+
+SEC("?lsm.s/bpf")
+__failure __msg("cannot pass in dynptr at an offset=-8")
+int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ unsigned long val;
+
+ return bpf_verify_pkcs7_signature((struct bpf_dynptr *)&val,
+ (struct bpf_dynptr *)&val, NULL);
+}
+
+SEC("?lsm.s/bpf")
+__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr")
+int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ unsigned long val = 0;
+
+ return bpf_verify_pkcs7_signature((struct bpf_dynptr *)val,
+ (struct bpf_dynptr *)val, NULL);
+}
+
+SEC("lsm.s/bpf")
+int BPF_PROG(dynptr_data_null, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ struct bpf_key *trusted_keyring;
+ struct bpf_dynptr ptr;
+ __u32 *value;
+ int ret, zero = 0;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ value = bpf_map_lookup_elem(&array_map, &zero);
+ if (!value)
+ return 0;
+
+ /* Pass invalid flags. */
+ ret = bpf_dynptr_from_mem(value, sizeof(*value), ((__u64)~0ULL), &ptr);
+ if (ret != -EINVAL)
+ return 0;
+
+ trusted_keyring = bpf_lookup_system_key(0);
+ if (!trusted_keyring)
+ return 0;
+
+ err = bpf_verify_pkcs7_signature(&ptr, &ptr, trusted_keyring);
+
+ bpf_key_put(trusted_keyring);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
new file mode 100644
index 000000000000..0ad1bf1ede8d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("tc")
+int kfunc_dynptr_nullable_test1(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_kfunc_dynptr_test(&data, NULL);
+
+ return 0;
+}
+
+SEC("tc")
+int kfunc_dynptr_nullable_test2(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_kfunc_dynptr_test(&data, &data);
+
+ return 0;
+}
+
+SEC("tc")
+__failure __msg("expected pointer to stack or const struct bpf_dynptr")
+int kfunc_dynptr_nullable_test3(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_kfunc_dynptr_test(NULL, &data);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
new file mode 100644
index 000000000000..27109b877714
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+
+extern const int bpf_prog_active __ksym; /* int type global var. */
+
+SEC("raw_tp/sys_enter")
+int handler1(const void *ctx)
+{
+ int *active;
+ __u32 cpu;
+
+ cpu = bpf_get_smp_processor_id();
+ active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
+ if (active) {
+ /* Kernel memory obtained from bpf_{per,this}_cpu_ptr
+ * is read-only, should _not_ pass verification.
+ */
+ /* WRITE_ONCE */
+ *(volatile int *)active = -1;
+ }
+
+ return 0;
+}
+
+__noinline int write_active(int *p)
+{
+ return p ? (*p = 42) : 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handler2(const void *ctx)
+{
+ int *active;
+
+ active = bpf_this_cpu_ptr(&bpf_prog_active);
+ write_active(active);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_module.c b/tools/testing/selftests/bpf/progs/test_ksyms_module.c
new file mode 100644
index 000000000000..0650d918c096
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ksyms_module.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+#define X_0(x)
+#define X_1(x) x X_0(x)
+#define X_2(x) x X_1(x)
+#define X_3(x) x X_2(x)
+#define X_4(x) x X_3(x)
+#define X_5(x) x X_4(x)
+#define X_6(x) x X_5(x)
+#define X_7(x) x X_6(x)
+#define X_8(x) x X_7(x)
+#define X_9(x) x X_8(x)
+#define X_10(x) x X_9(x)
+#define REPEAT_256(Y) X_2(X_10(X_10(Y))) X_5(X_10(Y)) X_6(Y)
+
+extern const int bpf_testmod_ksym_percpu __ksym;
+extern void bpf_testmod_test_mod_kfunc(int i) __ksym;
+extern void bpf_testmod_invalid_mod_kfunc(void) __ksym __weak;
+
+int out_bpf_testmod_ksym = 0;
+const volatile int x = 0;
+
+SEC("tc")
+int load(struct __sk_buff *skb)
+{
+ /* This will be kept by clang, but removed by verifier. Since it is
+ * marked as __weak, libbpf and gen_loader don't error out if BTF ID
+ * is not found for it, instead imm and off is set to 0 for it.
+ */
+ if (x)
+ bpf_testmod_invalid_mod_kfunc();
+ bpf_testmod_test_mod_kfunc(42);
+ out_bpf_testmod_ksym = *(int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu);
+ return 0;
+}
+
+SEC("tc")
+int load_256(struct __sk_buff *skb)
+{
+ /* this will fail if kfunc doesn't reuse its own btf fd index */
+ REPEAT_256(bpf_testmod_test_mod_kfunc(42););
+ bpf_testmod_test_mod_kfunc(42);
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
new file mode 100644
index 000000000000..d00268c91e19
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test weak ksyms.
+ *
+ * Copyright (c) 2021 Google
+ */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+
+int out__existing_typed = -1;
+__u64 out__existing_typeless = -1;
+
+__u64 out__non_existent_typeless = -1;
+__u64 out__non_existent_typed = -1;
+
+/* existing weak symbols */
+
+/* test existing weak symbols can be resolved. */
+extern const struct rq runqueues __ksym __weak; /* typed */
+extern const void bpf_prog_active __ksym __weak; /* typeless */
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
+void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
+
+
+/* non-existent weak symbols. */
+
+/* typeless symbols, default to zero. */
+extern const void bpf_link_fops1 __ksym __weak;
+
+/* typed symbols, default to zero. */
+extern const int bpf_link_fops2 __ksym __weak;
+void invalid_kfunc(void) __ksym __weak;
+
+SEC("raw_tp/sys_enter")
+int pass_handler(const void *ctx)
+{
+ struct rq *rq;
+
+ /* tests existing symbols. */
+ rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
+ if (rq && bpf_ksym_exists(&runqueues))
+ out__existing_typed = rq->cpu;
+ out__existing_typeless = (__u64)&bpf_prog_active;
+
+ /* tests non-existent symbols. */
+ out__non_existent_typeless = (__u64)&bpf_link_fops1;
+
+ /* tests non-existent symbols. */
+ out__non_existent_typed = (__u64)&bpf_link_fops2;
+
+ if (&bpf_link_fops2) /* can't happen */
+ out__non_existent_typed = (__u64)bpf_per_cpu_ptr(&bpf_link_fops2, 0);
+
+ if (!bpf_ksym_exists(bpf_task_acquire))
+ /* dead code won't be seen by the verifier */
+ bpf_task_acquire(0);
+
+ if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc))
+ /* dead code won't be seen by the verifier */
+ bpf_testmod_test_mod_kfunc(0);
+
+ if (bpf_ksym_exists(invalid_kfunc))
+ /* dead code won't be seen by the verifier */
+ invalid_kfunc();
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c
index 33493911d87a..c26057ec46dc 100644
--- a/tools/testing/selftests/bpf/progs/test_l4lb.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb.c
@@ -21,8 +21,6 @@
#include "test_iptunnel_common.h"
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
-
static inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
@@ -450,7 +448,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end,
return bpf_redirect(ifindex, 0);
}
-SEC("l4lb-demo")
+SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
index b9e2753f4f91..c8bc0c6947aa 100644
--- a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
@@ -218,7 +218,7 @@ static __noinline bool get_packet_dst(struct real_definition **real,
if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
- return 0;
+ return false;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
@@ -447,7 +447,7 @@ static __noinline int process_packet(void *data, __u64 off, void *data_end,
return bpf_redirect(ifindex, 0);
}
-SEC("l4lb-demo")
+SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c
new file mode 100644
index 000000000000..f997f5080748
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/pkt_cls.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <bpf/bpf_helpers.h>
+#include "test_iptunnel_common.h"
+#include <bpf/bpf_endian.h>
+
+#include "bpf_kfuncs.h"
+
+static __always_inline __u32 rol32(__u32 word, unsigned int shift)
+{
+ return (word << shift) | (word >> ((-shift) & 31));
+}
+
+/* copy paste of jhash from kernel sources to make sure llvm
+ * can compile it into valid sequence of bpf instructions
+ */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= c; a ^= rol32(c, 4); c += b; \
+ b -= a; b ^= rol32(a, 6); a += c; \
+ c -= b; c ^= rol32(b, 8); b += a; \
+ a -= c; a ^= rol32(c, 16); c += b; \
+ b -= a; b ^= rol32(a, 19); a += c; \
+ c -= b; c ^= rol32(b, 4); b += a; \
+}
+
+#define __jhash_final(a, b, c) \
+{ \
+ c ^= b; c -= rol32(b, 14); \
+ a ^= c; a -= rol32(c, 11); \
+ b ^= a; b -= rol32(a, 25); \
+ c ^= b; c -= rol32(b, 16); \
+ a ^= c; a -= rol32(c, 4); \
+ b ^= a; b -= rol32(a, 14); \
+ c ^= b; c -= rol32(b, 24); \
+}
+
+#define JHASH_INITVAL 0xdeadbeef
+
+typedef unsigned int u32;
+
+static __noinline u32 jhash(const void *key, u32 length, u32 initval)
+{
+ u32 a, b, c;
+ const unsigned char *k = key;
+
+ a = b = c = JHASH_INITVAL + length + initval;
+
+ while (length > 12) {
+ a += *(u32 *)(k);
+ b += *(u32 *)(k + 4);
+ c += *(u32 *)(k + 8);
+ __jhash_mix(a, b, c);
+ length -= 12;
+ k += 12;
+ }
+ switch (length) {
+ case 12: c += (u32)k[11]<<24;
+ case 11: c += (u32)k[10]<<16;
+ case 10: c += (u32)k[9]<<8;
+ case 9: c += k[8];
+ case 8: b += (u32)k[7]<<24;
+ case 7: b += (u32)k[6]<<16;
+ case 6: b += (u32)k[5]<<8;
+ case 5: b += k[4];
+ case 4: a += (u32)k[3]<<24;
+ case 3: a += (u32)k[2]<<16;
+ case 2: a += (u32)k[1]<<8;
+ case 1: a += k[0];
+ __jhash_final(a, b, c);
+ case 0: /* Nothing left to add */
+ break;
+ }
+
+ return c;
+}
+
+static __noinline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+ a += initval;
+ b += initval;
+ c += initval;
+ __jhash_final(a, b, c);
+ return c;
+}
+
+static __noinline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+ return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+#define PCKT_FRAGMENTED 65343
+#define IPV4_HDR_LEN_NO_OPT 20
+#define IPV4_PLUS_ICMP_HDR 28
+#define IPV6_PLUS_ICMP_HDR 48
+#define RING_SIZE 2
+#define MAX_VIPS 12
+#define MAX_REALS 5
+#define CTL_MAP_SIZE 16
+#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE)
+#define F_IPV6 (1 << 0)
+#define F_HASH_NO_SRC_PORT (1 << 0)
+#define F_ICMP (1 << 0)
+#define F_SYN_SET (1 << 1)
+
+struct packet_description {
+ union {
+ __be32 src;
+ __be32 srcv6[4];
+ };
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ union {
+ __u32 ports;
+ __u16 port16[2];
+ };
+ __u8 proto;
+ __u8 flags;
+};
+
+struct ctl_value {
+ union {
+ __u64 value;
+ __u32 ifindex;
+ __u8 mac[6];
+ };
+};
+
+struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+};
+
+struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+};
+
+struct vip_stats {
+ __u64 bytes;
+ __u64 pkts;
+};
+
+struct eth_hdr {
+ unsigned char eth_dest[ETH_ALEN];
+ unsigned char eth_source[ETH_ALEN];
+ unsigned short eth_proto;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, MAX_VIPS);
+ __type(key, struct vip);
+ __type(value, struct vip_meta);
+} vip_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, CH_RINGS_SIZE);
+ __type(key, __u32);
+ __type(value, __u32);
+} ch_rings SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, MAX_REALS);
+ __type(key, __u32);
+ __type(value, struct real_definition);
+} reals SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, MAX_VIPS);
+ __type(key, __u32);
+ __type(value, struct vip_stats);
+} stats SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, CTL_MAP_SIZE);
+ __type(key, __u32);
+ __type(value, struct ctl_value);
+} ctl_array SEC(".maps");
+
+static __noinline __u32 get_packet_hash(struct packet_description *pckt, bool ipv6)
+{
+ if (ipv6)
+ return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
+ pckt->ports, CH_RINGS_SIZE);
+ else
+ return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
+}
+
+static __noinline bool get_packet_dst(struct real_definition **real,
+ struct packet_description *pckt,
+ struct vip_meta *vip_info,
+ bool is_ipv6)
+{
+ __u32 hash = get_packet_hash(pckt, is_ipv6);
+ __u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE;
+ __u32 *real_pos;
+
+ if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
+ hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
+ return false;
+
+ real_pos = bpf_map_lookup_elem(&ch_rings, &key);
+ if (!real_pos)
+ return false;
+ key = *real_pos;
+ *real = bpf_map_lookup_elem(&reals, &key);
+ if (!(*real))
+ return false;
+ return true;
+}
+
+static __noinline int parse_icmpv6(struct bpf_dynptr *skb_ptr, __u64 off,
+ struct packet_description *pckt)
+{
+ __u8 buffer[sizeof(struct ipv6hdr)] = {};
+ struct icmp6hdr *icmp_hdr;
+ struct ipv6hdr *ip6h;
+
+ icmp_hdr = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
+ if (!icmp_hdr)
+ return TC_ACT_SHOT;
+
+ if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG)
+ return TC_ACT_OK;
+ off += sizeof(struct icmp6hdr);
+ ip6h = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
+ if (!ip6h)
+ return TC_ACT_SHOT;
+ pckt->proto = ip6h->nexthdr;
+ pckt->flags |= F_ICMP;
+ memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16);
+ memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16);
+ return TC_ACT_UNSPEC;
+}
+
+static __noinline int parse_icmp(struct bpf_dynptr *skb_ptr, __u64 off,
+ struct packet_description *pckt)
+{
+ __u8 buffer_icmp[sizeof(struct iphdr)] = {};
+ __u8 buffer_ip[sizeof(struct iphdr)] = {};
+ struct icmphdr *icmp_hdr;
+ struct iphdr *iph;
+
+ icmp_hdr = bpf_dynptr_slice(skb_ptr, off, buffer_icmp, sizeof(buffer_icmp));
+ if (!icmp_hdr)
+ return TC_ACT_SHOT;
+ if (icmp_hdr->type != ICMP_DEST_UNREACH ||
+ icmp_hdr->code != ICMP_FRAG_NEEDED)
+ return TC_ACT_OK;
+ off += sizeof(struct icmphdr);
+ iph = bpf_dynptr_slice(skb_ptr, off, buffer_ip, sizeof(buffer_ip));
+ if (!iph || iph->ihl != 5)
+ return TC_ACT_SHOT;
+ pckt->proto = iph->protocol;
+ pckt->flags |= F_ICMP;
+ pckt->src = iph->daddr;
+ pckt->dst = iph->saddr;
+ return TC_ACT_UNSPEC;
+}
+
+static __noinline bool parse_udp(struct bpf_dynptr *skb_ptr, __u64 off,
+ struct packet_description *pckt)
+{
+ __u8 buffer[sizeof(struct udphdr)] = {};
+ struct udphdr *udp;
+
+ udp = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
+ if (!udp)
+ return false;
+
+ if (!(pckt->flags & F_ICMP)) {
+ pckt->port16[0] = udp->source;
+ pckt->port16[1] = udp->dest;
+ } else {
+ pckt->port16[0] = udp->dest;
+ pckt->port16[1] = udp->source;
+ }
+ return true;
+}
+
+static __noinline bool parse_tcp(struct bpf_dynptr *skb_ptr, __u64 off,
+ struct packet_description *pckt)
+{
+ __u8 buffer[sizeof(struct tcphdr)] = {};
+ struct tcphdr *tcp;
+
+ tcp = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
+ if (!tcp)
+ return false;
+
+ if (tcp->syn)
+ pckt->flags |= F_SYN_SET;
+
+ if (!(pckt->flags & F_ICMP)) {
+ pckt->port16[0] = tcp->source;
+ pckt->port16[1] = tcp->dest;
+ } else {
+ pckt->port16[0] = tcp->dest;
+ pckt->port16[1] = tcp->source;
+ }
+ return true;
+}
+
+static __noinline int process_packet(struct bpf_dynptr *skb_ptr,
+ struct eth_hdr *eth, __u64 off,
+ bool is_ipv6, struct __sk_buff *skb)
+{
+ struct packet_description pckt = {};
+ struct bpf_tunnel_key tkey = {};
+ struct vip_stats *data_stats;
+ struct real_definition *dst;
+ struct vip_meta *vip_info;
+ struct ctl_value *cval;
+ __u32 v4_intf_pos = 1;
+ __u32 v6_intf_pos = 2;
+ struct ipv6hdr *ip6h;
+ struct vip vip = {};
+ struct iphdr *iph;
+ int tun_flag = 0;
+ __u16 pkt_bytes;
+ __u64 iph_len;
+ __u32 ifindex;
+ __u8 protocol;
+ __u32 vip_num;
+ int action;
+
+ tkey.tunnel_ttl = 64;
+ if (is_ipv6) {
+ __u8 buffer[sizeof(struct ipv6hdr)] = {};
+
+ ip6h = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
+ if (!ip6h)
+ return TC_ACT_SHOT;
+
+ iph_len = sizeof(struct ipv6hdr);
+ protocol = ip6h->nexthdr;
+ pckt.proto = protocol;
+ pkt_bytes = bpf_ntohs(ip6h->payload_len);
+ off += iph_len;
+ if (protocol == IPPROTO_FRAGMENT) {
+ return TC_ACT_SHOT;
+ } else if (protocol == IPPROTO_ICMPV6) {
+ action = parse_icmpv6(skb_ptr, off, &pckt);
+ if (action >= 0)
+ return action;
+ off += IPV6_PLUS_ICMP_HDR;
+ } else {
+ memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16);
+ memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16);
+ }
+ } else {
+ __u8 buffer[sizeof(struct iphdr)] = {};
+
+ iph = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
+ if (!iph || iph->ihl != 5)
+ return TC_ACT_SHOT;
+
+ protocol = iph->protocol;
+ pckt.proto = protocol;
+ pkt_bytes = bpf_ntohs(iph->tot_len);
+ off += IPV4_HDR_LEN_NO_OPT;
+
+ if (iph->frag_off & PCKT_FRAGMENTED)
+ return TC_ACT_SHOT;
+ if (protocol == IPPROTO_ICMP) {
+ action = parse_icmp(skb_ptr, off, &pckt);
+ if (action >= 0)
+ return action;
+ off += IPV4_PLUS_ICMP_HDR;
+ } else {
+ pckt.src = iph->saddr;
+ pckt.dst = iph->daddr;
+ }
+ }
+ protocol = pckt.proto;
+
+ if (protocol == IPPROTO_TCP) {
+ if (!parse_tcp(skb_ptr, off, &pckt))
+ return TC_ACT_SHOT;
+ } else if (protocol == IPPROTO_UDP) {
+ if (!parse_udp(skb_ptr, off, &pckt))
+ return TC_ACT_SHOT;
+ } else {
+ return TC_ACT_SHOT;
+ }
+
+ if (is_ipv6)
+ memcpy(vip.daddr.v6, pckt.dstv6, 16);
+ else
+ vip.daddr.v4 = pckt.dst;
+
+ vip.dport = pckt.port16[1];
+ vip.protocol = pckt.proto;
+ vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+ if (!vip_info) {
+ vip.dport = 0;
+ vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+ if (!vip_info)
+ return TC_ACT_SHOT;
+ pckt.port16[1] = 0;
+ }
+
+ if (vip_info->flags & F_HASH_NO_SRC_PORT)
+ pckt.port16[0] = 0;
+
+ if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6))
+ return TC_ACT_SHOT;
+
+ if (dst->flags & F_IPV6) {
+ cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos);
+ if (!cval)
+ return TC_ACT_SHOT;
+ ifindex = cval->ifindex;
+ memcpy(tkey.remote_ipv6, dst->dstv6, 16);
+ tun_flag = BPF_F_TUNINFO_IPV6;
+ } else {
+ cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos);
+ if (!cval)
+ return TC_ACT_SHOT;
+ ifindex = cval->ifindex;
+ tkey.remote_ipv4 = dst->dst;
+ }
+ vip_num = vip_info->vip_num;
+ data_stats = bpf_map_lookup_elem(&stats, &vip_num);
+ if (!data_stats)
+ return TC_ACT_SHOT;
+ data_stats->pkts++;
+ data_stats->bytes += pkt_bytes;
+ bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
+ *(u32 *)eth->eth_dest = tkey.remote_ipv4;
+ return bpf_redirect(ifindex, 0);
+}
+
+SEC("tc")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+ __u8 buffer[sizeof(struct eth_hdr)] = {};
+ struct bpf_dynptr ptr;
+ struct eth_hdr *eth;
+ __u32 eth_proto;
+ __u32 nh_off;
+ int err;
+
+ nh_off = sizeof(struct eth_hdr);
+
+ bpf_dynptr_from_skb(ctx, 0, &ptr);
+ eth = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ if (!eth)
+ return TC_ACT_SHOT;
+ eth_proto = eth->eth_proto;
+ if (eth_proto == bpf_htons(ETH_P_IP))
+ err = process_packet(&ptr, eth, nh_off, false, ctx);
+ else if (eth_proto == bpf_htons(ETH_P_IPV6))
+ err = process_packet(&ptr, eth, nh_off, true, ctx);
+ else
+ return TC_ACT_SHOT;
+
+ if (eth == buffer)
+ bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
+
+ return err;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c
new file mode 100644
index 000000000000..2a2a942737d7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_s390) || defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
+const volatile int skip = 0;
+#else
+const volatile int skip = 1;
+#endif
+
+volatile const short val1 = -1;
+volatile const int val2 = -1;
+short val3 = -1;
+int val4 = -1;
+int done1, done2, ret1, ret2;
+
+SEC("?raw_tp/sys_enter")
+int rdonly_map_prog(const void *ctx)
+{
+ if (done1)
+ return 0;
+
+ done1 = 1;
+ /* val1/val2 readonly map */
+ if (val1 == val2)
+ ret1 = 1;
+ return 0;
+
+}
+
+SEC("?raw_tp/sys_enter")
+int map_val_prog(const void *ctx)
+{
+ if (done2)
+ return 0;
+
+ done2 = 1;
+ /* val1/val2 regular read/write map */
+ if (val3 == val4)
+ ret2 = 1;
+ return 0;
+
+}
+
+struct bpf_testmod_struct_arg_1 {
+ int a;
+};
+
+long long int_member;
+
+SEC("?fentry/bpf_testmod_test_arg_ptr_to_struct")
+int BPF_PROG2(test_ptr_struct_arg, struct bpf_testmod_struct_arg_1 *, p)
+{
+ /* probed memory access */
+ int_member = p->a;
+ return 0;
+}
+
+long long set_optlen, set_retval;
+
+SEC("?cgroup/getsockopt")
+int _getsockopt(volatile struct bpf_sockopt *ctx)
+{
+ int old_optlen, old_retval;
+
+ old_optlen = ctx->optlen;
+ old_retval = ctx->retval;
+
+ ctx->optlen = -1;
+ ctx->retval = -1;
+
+ /* sign extension for ctx member */
+ set_optlen = ctx->optlen;
+ set_retval = ctx->retval;
+
+ ctx->optlen = old_optlen;
+ ctx->retval = old_retval;
+
+ return 0;
+}
+
+long long set_mark;
+
+SEC("?tc")
+int _tc(volatile struct __sk_buff *skb)
+{
+ long long tmp_mark;
+ int old_mark;
+
+ old_mark = skb->mark;
+
+ skb->mark = 0xf6fe;
+
+ /* narrowed sign extension for ctx member */
+#if __clang_major__ >= 18
+ /* force narrow one-byte signed load. Otherwise, compiler may
+ * generate a 32-bit unsigned load followed by an s8 movsx.
+ */
+ asm volatile ("r1 = *(s8 *)(%[ctx] + %[off_mark])\n\t"
+ "%[tmp_mark] = r1"
+ : [tmp_mark]"=r"(tmp_mark)
+ : [ctx]"r"(skb),
+ [off_mark]"i"(offsetof(struct __sk_buff, mark)
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ + sizeof(skb->mark) - 1
+#endif
+ )
+ : "r1");
+#else
+ tmp_mark = (char)skb->mark;
+#endif
+ set_mark = tmp_mark;
+
+ skb->mark = old_mark;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_legacy_printk.c b/tools/testing/selftests/bpf/progs/test_legacy_printk.c
new file mode 100644
index 000000000000..42718cd8e6a4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_legacy_printk.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#define BPF_NO_GLOBAL_DATA
+#include <bpf/bpf_helpers.h>
+
+char LICENSE[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+} my_pid_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+} res_map SEC(".maps");
+
+volatile int my_pid_var = 0;
+volatile int res_var = 0;
+
+SEC("tp/raw_syscalls/sys_enter")
+int handle_legacy(void *ctx)
+{
+ int zero = 0, *my_pid, cur_pid, *my_res;
+
+ my_pid = bpf_map_lookup_elem(&my_pid_map, &zero);
+ if (!my_pid)
+ return 1;
+
+ cur_pid = bpf_get_current_pid_tgid() >> 32;
+ if (cur_pid != *my_pid)
+ return 1;
+
+ my_res = bpf_map_lookup_elem(&res_map, &zero);
+ if (!my_res)
+ return 1;
+
+ if (*my_res == 0)
+ /* use bpf_printk() in combination with BPF_NO_GLOBAL_DATA to
+ * force .rodata.str1.1 section that previously caused
+ * problems on old kernels due to libbpf always tried to
+ * create a global data map for it
+ */
+ bpf_printk("Legacy-case bpf_printk test, pid %d\n", cur_pid);
+ *my_res = 1;
+
+ return *my_res;
+}
+
+SEC("tp/raw_syscalls/sys_enter")
+int handle_modern(void *ctx)
+{
+ int cur_pid;
+
+ cur_pid = bpf_get_current_pid_tgid() >> 32;
+ if (cur_pid != my_pid_var)
+ return 1;
+
+ if (res_var == 0)
+ /* we need bpf_printk() to validate libbpf logic around unused
+ * global maps and legacy kernels; see comment in handle_legacy()
+ */
+ bpf_printk("Modern-case bpf_printk test, pid %d\n", cur_pid);
+ res_var = 1;
+
+ return res_var;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
new file mode 100644
index 000000000000..568816307f71
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* From include/linux/mm.h. */
+#define FMODE_WRITE 0x2
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} data_input SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+SEC("lsm/bpf_map")
+int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode)
+{
+ if (map != (struct bpf_map *)&data_input)
+ return 0;
+
+ if (fmode & FMODE_WRITE)
+ return -EACCES;
+ barrier();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_log_buf.c b/tools/testing/selftests/bpf/progs/test_log_buf.c
new file mode 100644
index 000000000000..199f459bd5ae
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_log_buf.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+int a[4];
+const volatile int off = 4000;
+
+SEC("raw_tp/sys_enter")
+int good_prog(const void *ctx)
+{
+ a[0] = (int)(long)ctx;
+ return a[1];
+}
+
+SEC("raw_tp/sys_enter")
+int bad_prog(const void *ctx)
+{
+ /* out of bounds access */
+ return a[off];
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_log_fixup.c b/tools/testing/selftests/bpf/progs/test_log_fixup.c
new file mode 100644
index 000000000000..1bd48feaaa42
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_log_fixup.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct task_struct___bad {
+ int pid;
+ int fake_field;
+ void *fake_field_subprog;
+} __attribute__((preserve_access_index));
+
+SEC("?raw_tp/sys_enter")
+int bad_relo(const void *ctx)
+{
+ static struct task_struct___bad *t;
+
+ return bpf_core_field_size(t->fake_field);
+}
+
+static __noinline int bad_subprog(void)
+{
+ static struct task_struct___bad *t;
+
+ /* ugliness below is a field offset relocation */
+ return (void *)&t->fake_field_subprog - (void *)t;
+}
+
+SEC("?raw_tp/sys_enter")
+int bad_relo_subprog(const void *ctx)
+{
+ static struct task_struct___bad *t;
+
+ return bad_subprog() + bpf_core_field_size(t->pid);
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} existing_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} missing_map SEC(".maps");
+
+SEC("?raw_tp/sys_enter")
+int use_missing_map(const void *ctx)
+{
+ int zero = 0, *value;
+
+ value = bpf_map_lookup_elem(&existing_map, &zero);
+
+ value = bpf_map_lookup_elem(&missing_map, &zero);
+
+ return value != NULL;
+}
+
+extern int bpf_nonexistent_kfunc(void) __ksym __weak;
+
+SEC("?raw_tp/sys_enter")
+int use_missing_kfunc(const void *ctx)
+{
+ bpf_nonexistent_kfunc();
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_lookup_and_delete.c b/tools/testing/selftests/bpf/progs/test_lookup_and_delete.c
new file mode 100644
index 000000000000..3a193f42c7e7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lookup_and_delete.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+__u32 set_pid = 0;
+__u64 set_key = 0;
+__u64 set_value = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 2);
+ __type(key, __u64);
+ __type(value, __u64);
+} hash_map SEC(".maps");
+
+SEC("tp/syscalls/sys_enter_getpgid")
+int bpf_lookup_and_delete_test(const void *ctx)
+{
+ if (set_pid == bpf_get_current_pid_tgid() >> 32)
+ bpf_map_update_elem(&hash_map, &set_key, &set_value, BPF_NOEXIST);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_lookup_key.c b/tools/testing/selftests/bpf/progs/test_lookup_key.c
new file mode 100644
index 000000000000..1f7e1e59b073
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lookup_key.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 monitored_pid;
+__s32 key_serial;
+__u32 key_id;
+__u64 flags;
+
+extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym;
+extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
+extern void bpf_key_put(struct bpf_key *key) __ksym;
+
+SEC("lsm.s/bpf")
+int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ struct bpf_key *bkey;
+ __u32 pid;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ if (key_serial)
+ bkey = bpf_lookup_user_key(key_serial, flags);
+ else
+ bkey = bpf_lookup_system_key(key_id);
+
+ if (!bkey)
+ return -ENOENT;
+
+ bpf_key_put(bkey);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_redirect.c b/tools/testing/selftests/bpf/progs/test_lwt_redirect.c
new file mode 100644
index 000000000000..83439b87b766
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lwt_redirect.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/ip.h>
+#include <linux/if_ether.h>
+
+/* We don't care about whether the packet can be received by network stack.
+ * Just care if the packet is sent to the correct device at correct direction
+ * and not panic the kernel.
+ */
+static int prepend_dummy_mac(struct __sk_buff *skb)
+{
+ char mac[] = {0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0xf,
+ 0xe, 0xd, 0xc, 0xb, 0xa, 0x08, 0x00};
+
+ if (bpf_skb_change_head(skb, ETH_HLEN, 0))
+ return -1;
+
+ if (bpf_skb_store_bytes(skb, 0, mac, sizeof(mac), 0))
+ return -1;
+
+ return 0;
+}
+
+/* Use the last byte of IP address to redirect the packet */
+static int get_redirect_target(struct __sk_buff *skb)
+{
+ struct iphdr *iph = NULL;
+ void *start = (void *)(long)skb->data;
+ void *end = (void *)(long)skb->data_end;
+
+ if (start + sizeof(*iph) > end)
+ return -1;
+
+ iph = (struct iphdr *)start;
+ return bpf_ntohl(iph->daddr) & 0xff;
+}
+
+SEC("redir_ingress")
+int test_lwt_redirect_in(struct __sk_buff *skb)
+{
+ int target = get_redirect_target(skb);
+
+ if (target < 0)
+ return BPF_OK;
+
+ if (prepend_dummy_mac(skb))
+ return BPF_DROP;
+
+ return bpf_redirect(target, BPF_F_INGRESS);
+}
+
+SEC("redir_egress")
+int test_lwt_redirect_out(struct __sk_buff *skb)
+{
+ int target = get_redirect_target(skb);
+
+ if (target < 0)
+ return BPF_OK;
+
+ if (prepend_dummy_mac(skb))
+ return BPF_DROP;
+
+ return bpf_redirect(target, 0);
+}
+
+SEC("redir_egress_nomac")
+int test_lwt_redirect_out_nomac(struct __sk_buff *skb)
+{
+ int target = get_redirect_target(skb);
+
+ if (target < 0)
+ return BPF_OK;
+
+ return bpf_redirect(target, 0);
+}
+
+SEC("redir_ingress_nomac")
+int test_lwt_redirect_in_nomac(struct __sk_buff *skb)
+{
+ int target = get_redirect_target(skb);
+
+ if (target < 0)
+ return BPF_OK;
+
+ return bpf_redirect(target, BPF_F_INGRESS);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_reroute.c b/tools/testing/selftests/bpf/progs/test_lwt_reroute.c
new file mode 100644
index 000000000000..1dc64351929c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lwt_reroute.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+
+/* This function extracts the last byte of the daddr, and uses it
+ * as output dev index.
+ */
+SEC("lwt_xmit")
+int test_lwt_reroute(struct __sk_buff *skb)
+{
+ struct iphdr *iph = NULL;
+ void *start = (void *)(long)skb->data;
+ void *end = (void *)(long)skb->data_end;
+
+ /* set mark at most once */
+ if (skb->mark != 0)
+ return BPF_OK;
+
+ if (start + sizeof(*iph) > end)
+ return BPF_DROP;
+
+ iph = (struct iphdr *)start;
+ skb->mark = bpf_ntohl(iph->daddr) & 0xff;
+
+ /* do not reroute x.x.x.0 packets */
+ if (skb->mark == 0)
+ return BPF_OK;
+
+ return BPF_LWT_REROUTE;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
index 48ff2b2ad5e7..fed66f36adb6 100644
--- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
+++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
@@ -6,6 +6,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
+
/* Packet parsing state machine helpers. */
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
@@ -131,7 +133,7 @@ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
*pad_off = 0;
// we can only go as far as ~10 TLVs due to the BPF max stack size
- #pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < 10; i++) {
struct sr6_tlv_t tlv;
@@ -302,7 +304,7 @@ int __encap_srh(struct __sk_buff *skb)
seg = (struct ip6_addr_t *)((char *)srh + sizeof(*srh));
- #pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (unsigned long long lo = 0; lo < 4; lo++) {
seg->lo = bpf_cpu_to_be64(4 - lo);
seg->hi = bpf_cpu_to_be64(hi);
diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
index 1cfeb940cf9f..b295f9b721bf 100644
--- a/tools/testing/selftests/bpf/progs/test_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
@@ -9,21 +9,45 @@ struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
__uint(map_flags, 0);
- __uint(key_size, sizeof(__u32));
- /* must be sizeof(__u32) for map in map */
- __uint(value_size, sizeof(__u32));
+ __type(key, __u32);
+ __type(value, __u32);
} mim_array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 1);
__uint(map_flags, 0);
- __uint(key_size, sizeof(int));
- /* must be sizeof(__u32) for map in map */
- __uint(value_size, sizeof(__u32));
+ __type(key, int);
+ __type(value, __u32);
} mim_hash SEC(".maps");
-SEC("xdp_mimtest")
+/* The following three maps are used to test
+ * perf_event_array map can be an inner
+ * map of hash/array_of_maps.
+ */
+struct perf_event_array {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+} inner_map0 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __array(values, struct perf_event_array);
+} mim_array_pe SEC(".maps") = {
+ .values = {&inner_map0}};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __array(values, struct perf_event_array);
+} mim_hash_pe SEC(".maps") = {
+ .values = {&inner_map0}};
+
+SEC("xdp")
int xdp_mimtest0(struct xdp_md *ctx)
{
int value = 123;
@@ -49,5 +73,4 @@ int xdp_mimtest0(struct xdp_md *ctx)
return XDP_PASS;
}
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c b/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c
new file mode 100644
index 000000000000..9c7d75cf0bd6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Isovalent, Inc. */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct inner {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, int);
+ __uint(max_entries, 4);
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 0); /* This will make map creation to fail */
+ __type(key, __u32);
+ __array(values, struct inner);
+} mim SEC(".maps");
+
+SEC("xdp")
+int xdp_noop0(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_map_init.c b/tools/testing/selftests/bpf/progs/test_map_init.c
new file mode 100644
index 000000000000..c89d28ead673
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_map_init.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+__u64 inKey = 0;
+__u64 inValue = 0;
+__u32 inPid = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 2);
+ __type(key, __u64);
+ __type(value, __u64);
+} hashmap1 SEC(".maps");
+
+
+SEC("tp/syscalls/sys_enter_getpgid")
+int sysenter_getpgid(const void *ctx)
+{
+ /* Just do it for once, when called from our own test prog. This
+ * ensures the map value is only updated for a single CPU.
+ */
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (cur_pid == inPid)
+ bpf_map_update_elem(&hashmap1, &inKey, &inValue, BPF_NOEXIST);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c
index b5c07ae7b68f..1c02511b73cd 100644
--- a/tools/testing/selftests/bpf/progs/test_map_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_map_lock.c
@@ -30,10 +30,10 @@ struct {
__type(value, struct array_elem);
} array_map SEC(".maps");
-SEC("map_lock_demo")
+SEC("cgroup/skb")
int bpf_map_lock_test(struct __sk_buff *skb)
{
- struct hmap_elem zero = {}, *val;
+ struct hmap_elem *val;
int rnd = bpf_get_prandom_u32();
int key = 0, err = 1, i;
struct array_elem *q;
diff --git a/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
new file mode 100644
index 000000000000..ca827b1092da
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Bytedance */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+__u64 percpu_array_elem_sum = 0;
+__u64 percpu_hash_elem_sum = 0;
+__u64 percpu_lru_hash_elem_sum = 0;
+const volatile int nr_cpus;
+const volatile int my_pid;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_array_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u64);
+ __type(value, __u64);
+} percpu_hash_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u64);
+ __type(value, __u64);
+} percpu_lru_hash_map SEC(".maps");
+
+struct read_percpu_elem_ctx {
+ void *map;
+ __u64 sum;
+};
+
+static int read_percpu_elem_callback(__u32 index, struct read_percpu_elem_ctx *ctx)
+{
+ __u64 key = 0;
+ __u64 *value;
+
+ value = bpf_map_lookup_percpu_elem(ctx->map, &key, index);
+ if (value)
+ ctx->sum += *value;
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getuid")
+int sysenter_getuid(const void *ctx)
+{
+ struct read_percpu_elem_ctx map_ctx;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ map_ctx.map = &percpu_array_map;
+ map_ctx.sum = 0;
+ bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+ percpu_array_elem_sum = map_ctx.sum;
+
+ map_ctx.map = &percpu_hash_map;
+ map_ctx.sum = 0;
+ bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+ percpu_hash_elem_sum = map_ctx.sum;
+
+ map_ctx.map = &percpu_lru_hash_map;
+ map_ctx.sum = 0;
+ bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+ percpu_lru_hash_elem_sum = map_ctx.sum;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_map_ops.c b/tools/testing/selftests/bpf/progs/test_map_ops.c
new file mode 100644
index 000000000000..b53b46a090c8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_map_ops.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} hash_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK);
+ __uint(max_entries, 1);
+ __type(value, int);
+} stack_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} array_map SEC(".maps");
+
+const volatile pid_t pid;
+long err = 0;
+
+static u64 callback(u64 map, u64 key, u64 val, u64 ctx, u64 flags)
+{
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getpid")
+int map_update(void *ctx)
+{
+ const int key = 0;
+ const int val = 1;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ err = bpf_map_update_elem(&hash_map, &key, &val, BPF_NOEXIST);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getppid")
+int map_delete(void *ctx)
+{
+ const int key = 0;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ err = bpf_map_delete_elem(&hash_map, &key);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getuid")
+int map_push(void *ctx)
+{
+ const int val = 1;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ err = bpf_map_push_elem(&stack_map, &val, 0);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_geteuid")
+int map_pop(void *ctx)
+{
+ int val;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ err = bpf_map_pop_elem(&stack_map, &val);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getgid")
+int map_peek(void *ctx)
+{
+ int val;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ err = bpf_map_peek_elem(&stack_map, &val);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_gettid")
+int map_for_each_pass(void *ctx)
+{
+ const int key = 0;
+ const int val = 1;
+ const u64 flags = 0;
+ int callback_ctx;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ bpf_map_update_elem(&array_map, &key, &val, flags);
+
+ err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getpgid")
+int map_for_each_fail(void *ctx)
+{
+ const int key = 0;
+ const int val = 1;
+ const u64 flags = BPF_NOEXIST;
+ int callback_ctx;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ bpf_map_update_elem(&array_map, &key, &val, flags);
+
+ /* calling for_each with non-zero flags will return error */
+ err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_migrate_reuseport.c b/tools/testing/selftests/bpf/progs/test_migrate_reuseport.c
new file mode 100644
index 000000000000..27df571abf5b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_migrate_reuseport.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check if we can migrate child sockets.
+ *
+ * 1. If reuse_md->migrating_sk is NULL (SYN packet),
+ * return SK_PASS without selecting a listener.
+ * 2. If reuse_md->migrating_sk is not NULL (socket migration),
+ * select a listener (reuseport_map[migrate_map[cookie]])
+ *
+ * Author: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
+ */
+
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
+ __uint(max_entries, 256);
+ __type(key, int);
+ __type(value, __u64);
+} reuseport_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 256);
+ __type(key, __u64);
+ __type(value, int);
+} migrate_map SEC(".maps");
+
+int migrated_at_close = 0;
+int migrated_at_close_fastopen = 0;
+int migrated_at_send_synack = 0;
+int migrated_at_recv_ack = 0;
+__be16 server_port;
+
+SEC("xdp")
+int drop_ack(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eth = data;
+ struct tcphdr *tcp = NULL;
+
+ if (eth + 1 > data_end)
+ goto pass;
+
+ switch (bpf_ntohs(eth->h_proto)) {
+ case ETH_P_IP: {
+ struct iphdr *ip = (struct iphdr *)(eth + 1);
+
+ if (ip + 1 > data_end)
+ goto pass;
+
+ if (ip->protocol != IPPROTO_TCP)
+ goto pass;
+
+ tcp = (struct tcphdr *)((void *)ip + ip->ihl * 4);
+ break;
+ }
+ case ETH_P_IPV6: {
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)(eth + 1);
+
+ if (ipv6 + 1 > data_end)
+ goto pass;
+
+ if (ipv6->nexthdr != IPPROTO_TCP)
+ goto pass;
+
+ tcp = (struct tcphdr *)(ipv6 + 1);
+ break;
+ }
+ default:
+ goto pass;
+ }
+
+ if (tcp + 1 > data_end)
+ goto pass;
+
+ if (tcp->dest != server_port)
+ goto pass;
+
+ if (!tcp->syn && tcp->ack)
+ return XDP_DROP;
+
+pass:
+ return XDP_PASS;
+}
+
+SEC("sk_reuseport/migrate")
+int migrate_reuseport(struct sk_reuseport_md *reuse_md)
+{
+ int *key, flags = 0, state, err;
+ __u64 cookie;
+
+ if (!reuse_md->migrating_sk)
+ return SK_PASS;
+
+ state = reuse_md->migrating_sk->state;
+ cookie = bpf_get_socket_cookie(reuse_md->sk);
+
+ key = bpf_map_lookup_elem(&migrate_map, &cookie);
+ if (!key)
+ return SK_DROP;
+
+ err = bpf_sk_select_reuseport(reuse_md, &reuseport_map, key, flags);
+ if (err)
+ return SK_PASS;
+
+ switch (state) {
+ case BPF_TCP_ESTABLISHED:
+ __sync_fetch_and_add(&migrated_at_close, 1);
+ break;
+ case BPF_TCP_SYN_RECV:
+ __sync_fetch_and_add(&migrated_at_close_fastopen, 1);
+ break;
+ case BPF_TCP_NEW_SYN_RECV:
+ if (!reuse_md->len)
+ __sync_fetch_and_add(&migrated_at_send_synack, 1);
+ else
+ __sync_fetch_and_add(&migrated_at_recv_ack, 1);
+ break;
+ }
+
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
index 6077a025092c..d487153a839d 100644
--- a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
@@ -27,6 +27,7 @@ unsigned int nr_pure_ack = 0;
unsigned int nr_data = 0;
unsigned int nr_syn = 0;
unsigned int nr_fin = 0;
+unsigned int nr_hwtstamp = 0;
/* Check the header received from the active side */
static int __check_active_hdr_in(struct bpf_sock_ops *skops, bool check_syn)
@@ -146,6 +147,9 @@ static int check_active_hdr_in(struct bpf_sock_ops *skops)
if (th->ack && !th->fin && tcp_hdrlen(th) == skops->skb_len)
nr_pure_ack++;
+ if (skops->skb_hwtstamp)
+ nr_hwtstamp++;
+
return CG_OK;
}
@@ -293,7 +297,7 @@ static int handle_passive_estab(struct bpf_sock_ops *skops)
return check_active_hdr_in(skops);
}
-SEC("sockops/misc_estab")
+SEC("sockops")
int misc_estab(struct bpf_sock_ops *skops)
{
int true_val = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_mmap.c b/tools/testing/selftests/bpf/progs/test_mmap.c
index 4eb42cff5fe9..5a5cc19a15bf 100644
--- a/tools/testing/selftests/bpf/progs/test_mmap.c
+++ b/tools/testing/selftests/bpf/progs/test_mmap.c
@@ -9,7 +9,6 @@ char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 4096);
__uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG);
__type(key, __u32);
__type(value, char);
@@ -17,7 +16,6 @@ struct {
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 512 * 4); /* at least 4 pages of data */
__uint(map_flags, BPF_F_MMAPABLE);
__type(key, __u32);
__type(value, __u64);
diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c
new file mode 100644
index 000000000000..03d7f89787a1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_module_attach.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "../test_kmods/bpf_testmod.h"
+
+__u32 raw_tp_read_sz = 0;
+
+SEC("raw_tp/bpf_testmod_test_read")
+int BPF_PROG(handle_raw_tp,
+ struct task_struct *task, struct bpf_testmod_test_read_ctx *read_ctx)
+{
+ raw_tp_read_sz = BPF_CORE_READ(read_ctx, len);
+ return 0;
+}
+
+__u32 raw_tp_bare_write_sz = 0;
+
+SEC("raw_tp/bpf_testmod_test_write_bare_tp")
+int BPF_PROG(handle_raw_tp_bare,
+ struct task_struct *task, struct bpf_testmod_test_write_ctx *write_ctx)
+{
+ raw_tp_bare_write_sz = BPF_CORE_READ(write_ctx, len);
+ return 0;
+}
+
+int raw_tp_writable_bare_in_val = 0;
+int raw_tp_writable_bare_early_ret = 0;
+int raw_tp_writable_bare_out_val = 0;
+
+SEC("raw_tp.w/bpf_testmod_test_writable_bare_tp")
+int BPF_PROG(handle_raw_tp_writable_bare,
+ struct bpf_testmod_test_writable_ctx *writable)
+{
+ raw_tp_writable_bare_in_val = writable->val;
+ writable->early_ret = raw_tp_writable_bare_early_ret;
+ writable->val = raw_tp_writable_bare_out_val;
+ return 0;
+}
+
+__u32 tp_btf_read_sz = 0;
+
+SEC("tp_btf/bpf_testmod_test_read")
+int BPF_PROG(handle_tp_btf,
+ struct task_struct *task, struct bpf_testmod_test_read_ctx *read_ctx)
+{
+ tp_btf_read_sz = read_ctx->len;
+ return 0;
+}
+
+__u32 fentry_read_sz = 0;
+
+SEC("fentry/bpf_testmod_test_read")
+int BPF_PROG(handle_fentry,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ fentry_read_sz = len;
+ return 0;
+}
+
+__u32 fentry_manual_read_sz = 0;
+
+SEC("fentry")
+int BPF_PROG(handle_fentry_manual,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ fentry_manual_read_sz = len;
+ return 0;
+}
+
+__u32 fentry_explicit_read_sz = 0;
+
+SEC("fentry/bpf_testmod:bpf_testmod_test_read")
+int BPF_PROG(handle_fentry_explicit,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ fentry_explicit_read_sz = len;
+ return 0;
+}
+
+
+__u32 fentry_explicit_manual_read_sz = 0;
+
+SEC("fentry")
+int BPF_PROG(handle_fentry_explicit_manual,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ fentry_explicit_manual_read_sz = len;
+ return 0;
+}
+
+__u32 fexit_read_sz = 0;
+int fexit_ret = 0;
+
+SEC("fexit/bpf_testmod_test_read")
+int BPF_PROG(handle_fexit,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len,
+ int ret)
+{
+ fexit_read_sz = len;
+ fexit_ret = ret;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_return_ptr")
+int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
+{
+ long buf = 0;
+
+ bpf_probe_read_kernel(&buf, 8, ret);
+ bpf_probe_read_kernel(&buf, 8, (char *)ret + 256);
+ *(volatile int *)ret;
+ *(volatile int *)&ret->f_mode;
+ return 0;
+}
+
+__u32 fmod_ret_read_sz = 0;
+
+SEC("fmod_ret/bpf_testmod_test_read")
+int BPF_PROG(handle_fmod_ret,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ fmod_ret_read_sz = len;
+ return 0; /* don't override the exit code */
+}
+
+SEC("kprobe.multi/bpf_testmod_test_read")
+int BPF_PROG(kprobe_multi)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_netfilter_link_attach.c b/tools/testing/selftests/bpf/progs/test_netfilter_link_attach.c
new file mode 100644
index 000000000000..03a475160abe
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_netfilter_link_attach.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+#define NF_ACCEPT 1
+
+SEC("netfilter")
+int nf_link_attach_test(struct bpf_nf_ctx *ctx)
+{
+ return NF_ACCEPT;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
index 1dca70a6de2f..386315afad65 100644
--- a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
+++ b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
@@ -5,33 +5,48 @@
#include <stdint.h>
#include <bpf/bpf_helpers.h>
-static volatile struct {
- __u64 dev;
- __u64 ino;
- __u64 pid_tgid;
- __u64 user_pid_tgid;
-} res;
-
-SEC("raw_tracepoint/sys_enter")
-int trace(void *ctx)
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u32);
+} sock_map SEC(".maps");
+
+__u64 user_pid = 0;
+__u64 user_tgid = 0;
+__u64 dev = 0;
+__u64 ino = 0;
+
+static void get_pid_tgid(void)
{
- __u64 ns_pid_tgid, expected_pid;
struct bpf_pidns_info nsdata;
- __u32 key = 0;
- if (bpf_get_ns_current_pid_tgid(res.dev, res.ino, &nsdata,
- sizeof(struct bpf_pidns_info)))
- return 0;
+ if (bpf_get_ns_current_pid_tgid(dev, ino, &nsdata, sizeof(struct bpf_pidns_info)))
+ return;
- ns_pid_tgid = (__u64)nsdata.tgid << 32 | nsdata.pid;
- expected_pid = res.user_pid_tgid;
+ user_pid = nsdata.pid;
+ user_tgid = nsdata.tgid;
+}
- if (expected_pid != ns_pid_tgid)
- return 0;
+SEC("?tracepoint/syscalls/sys_enter_nanosleep")
+int tp_handler(const void *ctx)
+{
+ get_pid_tgid();
+ return 0;
+}
- res.pid_tgid = ns_pid_tgid;
+SEC("?cgroup/bind4")
+int cgroup_bind4(struct bpf_sock_addr *ctx)
+{
+ get_pid_tgid();
+ return 1;
+}
- return 0;
+SEC("?sk_msg")
+int sk_msg(struct sk_msg_md *msg)
+{
+ get_pid_tgid();
+ return SK_PASS;
}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_obj_id.c b/tools/testing/selftests/bpf/progs/test_obj_id.c
index ded71b3ff6b4..2850ae788a91 100644
--- a/tools/testing/selftests/bpf/progs/test_obj_id.c
+++ b/tools/testing/selftests/bpf/progs/test_obj_id.c
@@ -4,6 +4,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
@@ -19,6 +20,7 @@ int test_obj_id(void *ctx)
__u64 *value;
value = bpf_map_lookup_elem(&test_map_id, &key);
+ __sink(value);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c
new file mode 100644
index 000000000000..d9b2ba7ac340
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* This parsing logic is taken from the open source library katran, a layer 4
+ * load balancer.
+ *
+ * This code logic using dynptrs can be found in test_parse_tcp_hdr_opt_dynptr.c
+ *
+ * https://github.com/facebookincubator/katran/blob/main/katran/lib/bpf/pckt_parsing.h
+ */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/tcp.h>
+#include <stdbool.h>
+#include <linux/ipv6.h>
+#include <linux/if_ether.h>
+#include "test_tcp_hdr_options.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Kind number used for experiments */
+const __u32 tcp_hdr_opt_kind_tpr = 0xFD;
+/* Length of the tcp header option */
+const __u32 tcp_hdr_opt_len_tpr = 6;
+/* maximum number of header options to check to lookup server_id */
+const __u32 tcp_hdr_opt_max_opt_checks = 15;
+
+__u32 server_id;
+
+struct hdr_opt_state {
+ __u32 server_id;
+ __u8 byte_offset;
+ __u8 hdr_bytes_remaining;
+};
+
+static int parse_hdr_opt(const struct xdp_md *xdp, struct hdr_opt_state *state)
+{
+ const void *data = (void *)(long)xdp->data;
+ const void *data_end = (void *)(long)xdp->data_end;
+ __u8 *tcp_opt, kind, hdr_len;
+
+ tcp_opt = (__u8 *)(data + state->byte_offset);
+ if (tcp_opt + 1 > data_end)
+ return -1;
+
+ kind = tcp_opt[0];
+
+ if (kind == TCPOPT_EOL)
+ return -1;
+
+ if (kind == TCPOPT_NOP) {
+ state->hdr_bytes_remaining--;
+ state->byte_offset++;
+ return 0;
+ }
+
+ if (state->hdr_bytes_remaining < 2 ||
+ tcp_opt + sizeof(__u8) + sizeof(__u8) > data_end)
+ return -1;
+
+ hdr_len = tcp_opt[1];
+ if (hdr_len > state->hdr_bytes_remaining)
+ return -1;
+
+ if (kind == tcp_hdr_opt_kind_tpr) {
+ if (hdr_len != tcp_hdr_opt_len_tpr)
+ return -1;
+
+ if (tcp_opt + tcp_hdr_opt_len_tpr > data_end)
+ return -1;
+
+ state->server_id = *(__u32 *)&tcp_opt[2];
+ return 1;
+ }
+
+ state->hdr_bytes_remaining -= hdr_len;
+ state->byte_offset += hdr_len;
+ return 0;
+}
+
+SEC("xdp")
+int xdp_ingress_v6(struct xdp_md *xdp)
+{
+ const void *data = (void *)(long)xdp->data;
+ const void *data_end = (void *)(long)xdp->data_end;
+ struct hdr_opt_state opt_state = {};
+ __u8 tcp_hdr_opt_len = 0;
+ struct tcphdr *tcp_hdr;
+ __u64 tcp_offset = 0;
+ int err;
+
+ tcp_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
+ tcp_hdr = (struct tcphdr *)(data + tcp_offset);
+ if (tcp_hdr + 1 > data_end)
+ return XDP_DROP;
+
+ tcp_hdr_opt_len = (tcp_hdr->doff * 4) - sizeof(struct tcphdr);
+ if (tcp_hdr_opt_len < tcp_hdr_opt_len_tpr)
+ return XDP_DROP;
+
+ opt_state.hdr_bytes_remaining = tcp_hdr_opt_len;
+ opt_state.byte_offset = sizeof(struct tcphdr) + tcp_offset;
+
+ /* max number of bytes of options in tcp header is 40 bytes */
+ for (int i = 0; i < tcp_hdr_opt_max_opt_checks; i++) {
+ err = parse_hdr_opt(xdp, &opt_state);
+
+ if (err || !opt_state.hdr_bytes_remaining)
+ break;
+ }
+
+ if (!opt_state.server_id)
+ return XDP_DROP;
+
+ server_id = opt_state.server_id;
+
+ return XDP_PASS;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c
new file mode 100644
index 000000000000..dc6e43bc6a62
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* This logic is lifted from a real-world use case of packet parsing, used in
+ * the open source library katran, a layer 4 load balancer.
+ *
+ * This test demonstrates how to parse packet contents using dynptrs. The
+ * original code (parsing without dynptrs) can be found in test_parse_tcp_hdr_opt.c
+ */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/tcp.h>
+#include <stdbool.h>
+#include <linux/ipv6.h>
+#include <linux/if_ether.h>
+#include "test_tcp_hdr_options.h"
+#include "bpf_kfuncs.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Kind number used for experiments */
+const __u32 tcp_hdr_opt_kind_tpr = 0xFD;
+/* Length of the tcp header option */
+const __u32 tcp_hdr_opt_len_tpr = 6;
+/* maximum number of header options to check to lookup server_id */
+const __u32 tcp_hdr_opt_max_opt_checks = 15;
+
+__u32 server_id;
+
+static int parse_hdr_opt(struct bpf_dynptr *ptr, __u32 *off, __u8 *hdr_bytes_remaining,
+ __u32 *server_id)
+{
+ __u8 kind, hdr_len;
+ __u8 buffer[sizeof(kind) + sizeof(hdr_len) + sizeof(*server_id)];
+ __u8 *data;
+
+ __builtin_memset(buffer, 0, sizeof(buffer));
+
+ data = bpf_dynptr_slice(ptr, *off, buffer, sizeof(buffer));
+ if (!data)
+ return -1;
+
+ kind = data[0];
+
+ if (kind == TCPOPT_EOL)
+ return -1;
+
+ if (kind == TCPOPT_NOP) {
+ *off += 1;
+ *hdr_bytes_remaining -= 1;
+ return 0;
+ }
+
+ if (*hdr_bytes_remaining < 2)
+ return -1;
+
+ hdr_len = data[1];
+ if (hdr_len > *hdr_bytes_remaining)
+ return -1;
+
+ if (kind == tcp_hdr_opt_kind_tpr) {
+ if (hdr_len != tcp_hdr_opt_len_tpr)
+ return -1;
+
+ __builtin_memcpy(server_id, (__u32 *)(data + 2), sizeof(*server_id));
+ return 1;
+ }
+
+ *off += hdr_len;
+ *hdr_bytes_remaining -= hdr_len;
+ return 0;
+}
+
+SEC("xdp")
+int xdp_ingress_v6(struct xdp_md *xdp)
+{
+ __u8 buffer[sizeof(struct tcphdr)] = {};
+ __u8 hdr_bytes_remaining;
+ struct tcphdr *tcp_hdr;
+ __u8 tcp_hdr_opt_len;
+ int err = 0;
+ __u32 off;
+
+ struct bpf_dynptr ptr;
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr);
+
+ off = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
+
+ tcp_hdr = bpf_dynptr_slice(&ptr, off, buffer, sizeof(buffer));
+ if (!tcp_hdr)
+ return XDP_DROP;
+
+ tcp_hdr_opt_len = (tcp_hdr->doff * 4) - sizeof(struct tcphdr);
+ if (tcp_hdr_opt_len < tcp_hdr_opt_len_tpr)
+ return XDP_DROP;
+
+ hdr_bytes_remaining = tcp_hdr_opt_len;
+
+ off += sizeof(struct tcphdr);
+
+ /* max number of bytes of options in tcp header is 40 bytes */
+ for (int i = 0; i < tcp_hdr_opt_max_opt_checks; i++) {
+ err = parse_hdr_opt(&ptr, &off, &hdr_bytes_remaining, &server_id);
+
+ if (err || !hdr_bytes_remaining)
+ break;
+ }
+
+ if (!server_id)
+ return XDP_DROP;
+
+ return XDP_PASS;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c b/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
index fb22de7c365d..1249a945699f 100644
--- a/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
+++ b/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
@@ -7,15 +7,15 @@
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 1);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} array_1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 1);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
__uint(map_flags, BPF_F_PRESERVE_ELEMS);
} array_2 SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
index 8207a2dc2f9d..17d5b67744d5 100644
--- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c
+++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
@@ -7,19 +7,35 @@
#include <bpf/bpf_tracing.h>
struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+} my_pid_map SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} perf_buf_map SEC(".maps");
SEC("tp/raw_syscalls/sys_enter")
int handle_sys_enter(void *ctx)
{
+ int zero = 0, *my_pid, cur_pid;
int cpu = bpf_get_smp_processor_id();
+ my_pid = bpf_map_lookup_elem(&my_pid_map, &zero);
+ if (!my_pid)
+ return 1;
+
+ cur_pid = bpf_get_current_pid_tgid() >> 32;
+ if (cur_pid != *my_pid)
+ return 1;
+
bpf_perf_event_output(ctx, &perf_buf_map, BPF_F_CURRENT_CPU,
&cpu, sizeof(cpu));
- return 0;
+ return 1;
}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_perf_link.c b/tools/testing/selftests/bpf/progs/test_perf_link.c
new file mode 100644
index 000000000000..c1db9fd98d0c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_perf_link.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int run_cnt = 0;
+
+SEC("perf_event")
+int handler(struct pt_regs *ctx)
+{
+ __sync_fetch_and_add(&run_cnt, 1);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_perf_skip.c b/tools/testing/selftests/bpf/progs/test_perf_skip.c
new file mode 100644
index 000000000000..7eb8b6de7a57
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_perf_skip.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+uintptr_t ip;
+
+SEC("perf_event")
+int handler(struct bpf_perf_event_data *data)
+{
+ /* Skip events that have the correct ip. */
+ return ip != PT_REGS_IP(&data->regs);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_pinning.c b/tools/testing/selftests/bpf/progs/test_pinning.c
index 4ef2630292b2..0facea6cbbae 100644
--- a/tools/testing/selftests/bpf/progs/test_pinning.c
+++ b/tools/testing/selftests/bpf/progs/test_pinning.c
@@ -3,8 +3,6 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
diff --git a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c
index 5412e0c732c7..2a56db1094b8 100644
--- a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c
+++ b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c
@@ -3,8 +3,6 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c
index 852051064507..bce7173152c6 100644
--- a/tools/testing/selftests/bpf/progs/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
@@ -13,9 +13,7 @@
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-
-#define barrier() __asm__ __volatile__("": : :"memory")
-int _version SEC("version") = 1;
+#include "bpf_misc.h"
/* llvm will optimize both subprograms into exactly the same BPF assembly
*
@@ -54,6 +52,8 @@ int get_skb_len(struct __sk_buff *skb)
{
volatile char buf[MAX_STACK] = {};
+ __sink(buf[MAX_STACK - 1]);
+
return skb->len;
}
@@ -76,6 +76,8 @@ int get_skb_ifindex(int val, struct __sk_buff *skb, int var)
{
volatile char buf[MAX_STACK] = {};
+ __sink(buf[MAX_STACK - 1]);
+
return skb->ifindex * val * var;
}
@@ -97,7 +99,7 @@ int test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off)
return 0;
}
-SEC("classifier/test_pkt_access")
+SEC("tc")
int test_pkt_access(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_pkt_md_access.c b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
index 610c74ea9f64..d1839366f3e1 100644
--- a/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
@@ -7,8 +7,6 @@
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define TEST_FIELD(TYPE, FIELD, MASK) \
{ \
@@ -27,7 +25,7 @@ int _version SEC("version") = 1;
}
#endif
-SEC("classifier/test_pkt_md_access")
+SEC("tc")
int test_pkt_md_access(struct __sk_buff *skb)
{
TEST_FIELD(__u8, len, 0xFF);
diff --git a/tools/testing/selftests/bpf/progs/test_probe_read_user_str.c b/tools/testing/selftests/bpf/progs/test_probe_read_user_str.c
new file mode 100644
index 000000000000..3ae398b75dcd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_probe_read_user_str.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include <sys/types.h>
+
+pid_t pid = 0;
+long ret = 0;
+void *user_ptr = 0;
+char buf[256] = {};
+
+SEC("tracepoint/syscalls/sys_enter_nanosleep")
+int on_write(void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ ret = bpf_probe_read_user_str(buf, sizeof(buf), user_ptr);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c
index 89b3532ccc75..a8e501af9604 100644
--- a/tools/testing/selftests/bpf/progs/test_probe_user.c
+++ b/tools/testing/selftests/bpf/progs/test_probe_user.c
@@ -1,26 +1,47 @@
// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/ptrace.h>
-#include <linux/bpf.h>
-
-#include <netinet/in.h>
-
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
static struct sockaddr_in old;
-SEC("kprobe/__sys_connect")
-int BPF_KPROBE(handle_sys_connect)
+static int handle_sys_connect_common(struct sockaddr_in *uservaddr)
{
- void *ptr = (void *)PT_REGS_PARM2(ctx);
struct sockaddr_in new;
- bpf_probe_read_user(&old, sizeof(old), ptr);
+ bpf_probe_read_user(&old, sizeof(old), uservaddr);
__builtin_memset(&new, 0xab, sizeof(new));
- bpf_probe_write_user(ptr, &new, sizeof(new));
+ bpf_probe_write_user(uservaddr, &new, sizeof(new));
+
+ return 0;
+}
+
+SEC("ksyscall/connect")
+int BPF_KSYSCALL(handle_sys_connect, int fd, struct sockaddr_in *uservaddr,
+ int addrlen)
+{
+ return handle_sys_connect_common(uservaddr);
+}
+
+#if defined(bpf_target_s390)
+#ifndef SYS_CONNECT
+#define SYS_CONNECT 3
+#endif
+
+SEC("ksyscall/socketcall")
+int BPF_KSYSCALL(handle_sys_socketcall, int call, unsigned long *args)
+{
+ if (call == SYS_CONNECT) {
+ struct sockaddr_in *uservaddr;
+
+ bpf_probe_read_user(&uservaddr, sizeof(uservaddr), &args[1]);
+ return handle_sys_connect_common(uservaddr);
+ }
return 0;
}
+#endif
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_prog_array_init.c b/tools/testing/selftests/bpf/progs/test_prog_array_init.c
new file mode 100644
index 000000000000..2cd138356126
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_prog_array_init.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 Hengqi Chen */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+const volatile pid_t my_pid = 0;
+int value = 0;
+
+SEC("raw_tp/sys_enter")
+int tailcall_1(void *ctx)
+{
+ value = 42;
+ return 0;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 2);
+ __uint(key_size, sizeof(__u32));
+ __array(values, int (void *));
+} prog_array_init SEC(".maps") = {
+ .values = {
+ [1] = (void *)&tailcall_1,
+ },
+};
+
+SEC("raw_tp/sys_enter")
+int entry(void *ctx)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != my_pid)
+ return 0;
+
+ bpf_tail_call(ctx, &prog_array_init, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c
new file mode 100644
index 000000000000..89b0cd5a3e06
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+
+char tp_name[128];
+
+SEC("lsm.s/bpf")
+int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ switch (cmd) {
+ case BPF_RAW_TRACEPOINT_OPEN:
+ bpf_copy_from_user(tp_name, sizeof(tp_name) - 1,
+ (void *)attr->raw_tracepoint.name);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+SEC("raw_tracepoint")
+int BPF_PROG(raw_tp_run)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h
index 4dd9806ad73b..648e8cab7a23 100644
--- a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h
+++ b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h
@@ -8,8 +8,6 @@
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
struct {
__uint(type, MAP_TYPE);
__uint(max_entries, 32);
@@ -26,7 +24,7 @@ struct {
__uint(value_size, sizeof(__u32));
} map_out SEC(".maps");
-SEC("test")
+SEC("tc")
int _test(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
index ecbeea2df259..7035fb4d4165 100644
--- a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
+++ b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
@@ -4,8 +4,9 @@
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
-static volatile const struct {
+const struct {
unsigned a[4];
/*
* if the struct's size is multiple of 16, compiler will put it into
@@ -15,11 +16,11 @@ static volatile const struct {
char _y;
} rdonly_values = { .a = {2, 3, 4, 5} };
-static volatile struct {
+struct {
unsigned did_run;
unsigned iters;
unsigned sum;
-} res;
+} res = {};
SEC("raw_tracepoint/sys_enter:skip_loop")
int skip_loop(struct pt_regs *ctx)
@@ -64,7 +65,7 @@ int full_loop(struct pt_regs *ctx)
{
/* prevent compiler to optimize everything out */
unsigned * volatile p = (void *)&rdonly_values.a;
- int i = sizeof(rdonly_values.a) / sizeof(rdonly_values.a[0]);
+ int i = ARRAY_SIZE(rdonly_values.a);
unsigned iters = 0, sum = 0;
/* validate verifier can allow full loop as well */
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf.c b/tools/testing/selftests/bpf/progs/test_ringbuf.c
index 8ba9959b036b..501cefa97633 100644
--- a/tools/testing/selftests/bpf/progs/test_ringbuf.c
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf.c
@@ -3,6 +3,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
@@ -15,7 +16,6 @@ struct sample {
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
- __uint(max_entries, 1 << 12);
} ringbuf SEC(".maps");
/* inputs */
@@ -36,12 +36,11 @@ long prod_pos = 0;
/* inner state */
long seq = 0;
-SEC("tp/syscalls/sys_enter_getpgid")
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
int test_ringbuf(void *ctx)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
struct sample *sample;
- int zero = 0;
if (cur_pid != pid)
return 0;
@@ -49,7 +48,7 @@ int test_ringbuf(void *ctx)
sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
if (!sample) {
__sync_fetch_and_add(&dropped, 1);
- return 1;
+ return 0;
}
sample->pid = pid;
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c b/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c
new file mode 100644
index 000000000000..21bb7da90ea5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct sample {
+ int pid;
+ int seq;
+ long value;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+} ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1000);
+ __type(key, struct sample);
+ __type(value, int);
+} hash_map SEC(".maps");
+
+/* inputs */
+int pid = 0;
+
+/* inner state */
+long seq = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_ringbuf_mem_map_key(void *ctx)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+ struct sample *sample, sample_copy;
+ int *lookup_val;
+
+ if (cur_pid != pid)
+ return 0;
+
+ sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
+ if (!sample)
+ return 0;
+
+ sample->pid = pid;
+ bpf_get_current_comm(sample->comm, sizeof(sample->comm));
+ sample->seq = ++seq;
+ sample->value = 42;
+
+ /* test using 'sample' (PTR_TO_MEM | MEM_ALLOC) as map key arg
+ */
+ lookup_val = (int *)bpf_map_lookup_elem(&hash_map, sample);
+ __sink(lookup_val);
+
+ /* workaround - memcpy is necessary so that verifier doesn't
+ * complain with:
+ * verifier internal error: more than one arg with ref_obj_id R3
+ * when trying to do bpf_map_update_elem(&hash_map, sample, &sample->seq, BPF_ANY);
+ *
+ * Since bpf_map_lookup_elem above uses 'sample' as key, test using
+ * sample field as value below
+ */
+ __builtin_memcpy(&sample_copy, sample, sizeof(struct sample));
+ bpf_map_update_elem(&hash_map, &sample_copy, &sample->seq, BPF_ANY);
+
+ bpf_ringbuf_submit(sample, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
index edf3b6953533..9626baa6779c 100644
--- a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
@@ -15,7 +15,8 @@ struct sample {
struct ringbuf_map {
__uint(type, BPF_MAP_TYPE_RINGBUF);
- __uint(max_entries, 1 << 12);
+ /* libbpf will adjust to valid page size */
+ __uint(max_entries, 1000);
} ringbuf1 SEC(".maps"),
ringbuf2 SEC(".maps");
@@ -31,6 +32,17 @@ struct {
},
};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __array(values, struct ringbuf_map);
+} ringbuf_hash SEC(".maps") = {
+ .values = {
+ [0] = &ringbuf1,
+ },
+};
+
/* inputs */
int pid = 0;
int target_ring = 0;
@@ -47,7 +59,6 @@ int test_ringbuf(void *ctx)
int cur_pid = bpf_get_current_pid_tgid() >> 32;
struct sample *sample;
void *rb;
- int zero = 0;
if (cur_pid != pid)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_n.c b/tools/testing/selftests/bpf/progs/test_ringbuf_n.c
new file mode 100644
index 000000000000..8669eb42dbe0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_n.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Andrea Righi <andrea.righi@canonical.com>
+
+#include <linux/bpf.h>
+#include <sched.h>
+#include <unistd.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define TASK_COMM_LEN 16
+
+struct sample {
+ int pid;
+ long value;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+} ringbuf SEC(".maps");
+
+int pid = 0;
+long value = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_ringbuf_n(void *ctx)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+ struct sample *sample;
+
+ if (cur_pid != pid)
+ return 0;
+
+ sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
+ if (!sample)
+ return 0;
+
+ sample->pid = pid;
+ sample->value = value;
+ bpf_get_current_comm(sample->comm, sizeof(sample->comm));
+
+ bpf_ringbuf_submit(sample, 0);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
new file mode 100644
index 000000000000..f063a0013f85
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+} ringbuf SEC(".maps");
+
+/* inputs */
+int pid = 0;
+
+/* outputs */
+long passed = 0;
+long discarded = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_ringbuf_write(void *ctx)
+{
+ int *foo, cur_pid = bpf_get_current_pid_tgid() >> 32;
+ void *sample1, *sample2;
+
+ if (cur_pid != pid)
+ return 0;
+
+ sample1 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0);
+ if (!sample1)
+ return 0;
+ /* first one can pass */
+ sample2 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0);
+ if (!sample2) {
+ bpf_ringbuf_discard(sample1, 0);
+ __sync_fetch_and_add(&discarded, 1);
+ return 0;
+ }
+ /* second one must not */
+ __sync_fetch_and_add(&passed, 1);
+ foo = sample2 + 4084;
+ *foo = 256;
+ bpf_ringbuf_discard(sample1, 0);
+ bpf_ringbuf_discard(sample2, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
index a7278f064368..5059050f74f6 100644
--- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c
+++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
@@ -6,6 +6,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
+
/* Packet parsing state machine helpers. */
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
@@ -134,7 +136,7 @@ static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb,
// we can only go as far as ~10 TLVs due to the BPF max stack size
// workaround: define induction variable "i" as "long" instead
// of "int" to prevent alu32 sub-register spilling.
- #pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (long i = 0; i < 100; i++) {
struct sr6_tlv_t tlv;
diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
index 26e77dcc7e91..a5be3267dbb0 100644
--- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Facebook */
-#include <stdlib.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -15,8 +14,6 @@
#include <bpf/bpf_helpers.h>
#include "test_select_reuseport_common.h"
-int _version SEC("version") = 1;
-
#ifndef offsetof
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
@@ -24,8 +21,8 @@ int _version SEC("version") = 1;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(__u32));
+ __type(key, __u32);
+ __type(value, __u32);
} outer_map SEC(".maps");
struct {
@@ -66,7 +63,7 @@ SEC("sk_reuseport")
int _select_by_skb_data(struct sk_reuseport_md *reuse_md)
{
__u32 linum, index = 0, flags = 0, index_zero = 0;
- __u32 *result_cnt, *linum_value;
+ __u32 *result_cnt;
struct data_check data_check = {};
struct cmd *cmd, cmd_copy;
void *data, *data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c
index b4233d3efac2..176a355e3062 100644
--- a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c
@@ -1,27 +1,50 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
-#include <linux/bpf.h>
+#include <vmlinux.h>
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
-__u32 sig = 0, pid = 0, status = 0, signal_thread = 0;
+struct task_struct *bpf_task_from_pid(int pid) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type, u64 value) __ksym;
+
+__u32 sig = 0, pid = 0, status = 0, signal_thread = 0, target_pid = 0;
static __always_inline int bpf_send_signal_test(void *ctx)
{
+ struct task_struct *target_task = NULL;
int ret;
+ u64 value;
- if (status != 0 || sig == 0 || pid == 0)
+ if (status != 0 || pid == 0)
return 0;
if ((bpf_get_current_pid_tgid() >> 32) == pid) {
- if (signal_thread)
- ret = bpf_send_signal_thread(sig);
- else
- ret = bpf_send_signal(sig);
+ if (target_pid) {
+ target_task = bpf_task_from_pid(target_pid);
+ if (!target_task)
+ return 0;
+ value = 8;
+ }
+
+ if (signal_thread) {
+ if (target_pid)
+ ret = bpf_send_signal_task(target_task, sig, PIDTYPE_PID, value);
+ else
+ ret = bpf_send_signal_thread(sig);
+ } else {
+ if (target_pid)
+ ret = bpf_send_signal_task(target_task, sig, PIDTYPE_TGID, value);
+ else
+ ret = bpf_send_signal(sig);
+ }
if (ret == 0)
status = 1;
}
+ if (target_task)
+ bpf_task_release(target_task);
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_set_remove_xattr.c b/tools/testing/selftests/bpf/progs/test_set_remove_xattr.c
new file mode 100644
index 000000000000..6a612cf168d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_set_remove_xattr.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__u32 monitored_pid;
+
+const char xattr_foo[] = "security.bpf.foo";
+const char xattr_bar[] = "security.bpf.bar";
+static const char xattr_selinux[] = "security.selinux";
+char value_bar[] = "world";
+char read_value[32];
+
+bool set_security_bpf_bar_success;
+bool remove_security_bpf_bar_success;
+bool set_security_selinux_fail;
+bool remove_security_selinux_fail;
+
+char name_buf[32];
+
+static inline bool name_match_foo(const char *name)
+{
+ bpf_probe_read_kernel(name_buf, sizeof(name_buf), name);
+
+ return !bpf_strncmp(name_buf, sizeof(xattr_foo), xattr_foo);
+}
+
+/* Test bpf_set_dentry_xattr and bpf_remove_dentry_xattr */
+SEC("lsm.s/inode_getxattr")
+int BPF_PROG(test_inode_getxattr, struct dentry *dentry, char *name)
+{
+ struct bpf_dynptr value_ptr;
+ __u32 pid;
+ int ret;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ /* Only do the following for security.bpf.foo */
+ if (!name_match_foo(name))
+ return 0;
+
+ bpf_dynptr_from_mem(read_value, sizeof(read_value), 0, &value_ptr);
+
+ /* read security.bpf.bar */
+ ret = bpf_get_dentry_xattr(dentry, xattr_bar, &value_ptr);
+
+ if (ret < 0) {
+ /* If security.bpf.bar doesn't exist, set it */
+ bpf_dynptr_from_mem(value_bar, sizeof(value_bar), 0, &value_ptr);
+
+ ret = bpf_set_dentry_xattr(dentry, xattr_bar, &value_ptr, 0);
+ if (!ret)
+ set_security_bpf_bar_success = true;
+ ret = bpf_set_dentry_xattr(dentry, xattr_selinux, &value_ptr, 0);
+ if (ret)
+ set_security_selinux_fail = true;
+ } else {
+ /* If security.bpf.bar exists, remove it */
+ ret = bpf_remove_dentry_xattr(dentry, xattr_bar);
+ if (!ret)
+ remove_security_bpf_bar_success = true;
+
+ ret = bpf_remove_dentry_xattr(dentry, xattr_selinux);
+ if (ret)
+ remove_security_selinux_fail = true;
+ }
+
+ return 0;
+}
+
+bool locked_set_security_bpf_bar_success;
+bool locked_remove_security_bpf_bar_success;
+bool locked_set_security_selinux_fail;
+bool locked_remove_security_selinux_fail;
+
+/* Test bpf_set_dentry_xattr_locked and bpf_remove_dentry_xattr_locked.
+ * It not necessary to differentiate the _locked version and the
+ * not-_locked version in the BPF program. The verifier will fix them up
+ * properly.
+ */
+SEC("lsm.s/inode_setxattr")
+int BPF_PROG(test_inode_setxattr, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct bpf_dynptr value_ptr;
+ __u32 pid;
+ int ret;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ /* Only do the following for security.bpf.foo */
+ if (!name_match_foo(name))
+ return 0;
+
+ bpf_dynptr_from_mem(read_value, sizeof(read_value), 0, &value_ptr);
+
+ /* read security.bpf.bar */
+ ret = bpf_get_dentry_xattr(dentry, xattr_bar, &value_ptr);
+
+ if (ret < 0) {
+ /* If security.bpf.bar doesn't exist, set it */
+ bpf_dynptr_from_mem(value_bar, sizeof(value_bar), 0, &value_ptr);
+
+ ret = bpf_set_dentry_xattr(dentry, xattr_bar, &value_ptr, 0);
+ if (!ret)
+ locked_set_security_bpf_bar_success = true;
+ ret = bpf_set_dentry_xattr(dentry, xattr_selinux, &value_ptr, 0);
+ if (ret)
+ locked_set_security_selinux_fail = true;
+ } else {
+ /* If security.bpf.bar exists, remove it */
+ ret = bpf_remove_dentry_xattr(dentry, xattr_bar);
+ if (!ret)
+ locked_remove_security_bpf_bar_success = true;
+
+ ret = bpf_remove_dentry_xattr(dentry, xattr_selinux);
+ if (ret)
+ locked_remove_security_selinux_fail = true;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c
new file mode 100644
index 000000000000..34b30e2603f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_kfuncs.h"
+#include "err.h"
+
+char _license[] SEC("license") = "GPL";
+
+#ifndef SHA256_DIGEST_SIZE
+#define SHA256_DIGEST_SIZE 32
+#endif
+
+#define MAX_SIG_SIZE 1024
+
+/* By default, "fsverity sign" signs a file with fsverity_formatted_digest
+ * of the file. fsverity_formatted_digest on the kernel side is only used
+ * with CONFIG_FS_VERITY_BUILTIN_SIGNATURES. However, BPF LSM doesn't not
+ * require CONFIG_FS_VERITY_BUILTIN_SIGNATURES, so vmlinux.h may not have
+ * fsverity_formatted_digest. In this test, we intentionally avoid using
+ * fsverity_formatted_digest.
+ *
+ * Luckily, fsverity_formatted_digest is simply 8-byte magic followed by
+ * fsverity_digest. We use a char array of size fsverity_formatted_digest
+ * plus SHA256_DIGEST_SIZE. The magic part of it is filled by user space,
+ * and the rest of it is filled by bpf_get_fsverity_digest.
+ *
+ * Note that, generating signatures based on fsverity_formatted_digest is
+ * the design choice of this selftest (and "fsverity sign"). With BPF
+ * LSM, we have the flexibility to generate signature based on other data
+ * sets, for example, fsverity_digest or only the digest[] part of it.
+ */
+#define MAGIC_SIZE 8
+#define SIZEOF_STRUCT_FSVERITY_DIGEST 4 /* sizeof(struct fsverity_digest) */
+char digest[MAGIC_SIZE + SIZEOF_STRUCT_FSVERITY_DIGEST + SHA256_DIGEST_SIZE];
+
+__u32 monitored_pid;
+char sig[MAX_SIG_SIZE];
+__u32 sig_size;
+__s32 user_keyring_serial;
+
+SEC("lsm.s/file_open")
+int BPF_PROG(test_file_open, struct file *f)
+{
+ struct bpf_dynptr digest_ptr, sig_ptr;
+ struct bpf_key *trusted_keyring;
+ __u32 pid;
+ int ret;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ /* digest_ptr points to fsverity_digest */
+ bpf_dynptr_from_mem(digest + MAGIC_SIZE, sizeof(digest) - MAGIC_SIZE, 0, &digest_ptr);
+
+ ret = bpf_get_fsverity_digest(f, &digest_ptr);
+ /* No verity, allow access */
+ if (ret < 0)
+ return 0;
+
+ /* Move digest_ptr to fsverity_formatted_digest */
+ bpf_dynptr_from_mem(digest, sizeof(digest), 0, &digest_ptr);
+
+ /* Read signature from xattr */
+ bpf_dynptr_from_mem(sig, sizeof(sig), 0, &sig_ptr);
+ ret = bpf_get_file_xattr(f, "user.sig", &sig_ptr);
+ /* No signature, reject access */
+ if (ret < 0)
+ return -EPERM;
+
+ trusted_keyring = bpf_lookup_user_key(user_keyring_serial, 0);
+ if (!trusted_keyring)
+ return -ENOENT;
+
+ /* Verify signature */
+ ret = bpf_verify_pkcs7_signature(&digest_ptr, &sig_ptr, trusted_keyring);
+
+ bpf_key_put(trusted_keyring);
+
+ set_if_not_errno_or_zero(ret, -EFAULT);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_siphash.h b/tools/testing/selftests/bpf/progs/test_siphash.h
new file mode 100644
index 000000000000..5d3a7ec36780
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_siphash.h
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#ifndef _TEST_SIPHASH_H
+#define _TEST_SIPHASH_H
+
+/* include/linux/bitops.h */
+static inline u64 rol64(u64 word, unsigned int shift)
+{
+ return (word << (shift & 63)) | (word >> ((-shift) & 63));
+}
+
+/* include/linux/siphash.h */
+#define SIPHASH_PERMUTATION(a, b, c, d) ( \
+ (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
+ (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
+ (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
+ (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
+
+#define SIPHASH_CONST_0 0x736f6d6570736575ULL
+#define SIPHASH_CONST_1 0x646f72616e646f6dULL
+#define SIPHASH_CONST_2 0x6c7967656e657261ULL
+#define SIPHASH_CONST_3 0x7465646279746573ULL
+
+/* lib/siphash.c */
+#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
+
+#define PREAMBLE(len) \
+ u64 v0 = SIPHASH_CONST_0; \
+ u64 v1 = SIPHASH_CONST_1; \
+ u64 v2 = SIPHASH_CONST_2; \
+ u64 v3 = SIPHASH_CONST_3; \
+ u64 b = ((u64)(len)) << 56; \
+ v3 ^= key->key[1]; \
+ v2 ^= key->key[0]; \
+ v1 ^= key->key[1]; \
+ v0 ^= key->key[0];
+
+#define POSTAMBLE \
+ v3 ^= b; \
+ SIPROUND; \
+ SIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ return (v0 ^ v1) ^ (v2 ^ v3);
+
+static inline u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
+{
+ PREAMBLE(16)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ POSTAMBLE
+}
+#endif
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c
index 1ecd987005d2..3079244c7f96 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_assign.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c
@@ -15,7 +15,18 @@
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_misc.h"
+#if defined(IPROUTE2_HAVE_LIBBPF)
+/* Use a new-style map definition. */
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __type(key, int);
+ __type(value, __u64);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+ __uint(max_entries, 1);
+} server_map SEC(".maps");
+#else
/* Pin map under /sys/fs/bpf/tc/globals/<map name> */
#define PIN_GLOBAL_NS 2
@@ -35,8 +46,8 @@ struct {
.max_elem = 1,
.pinning = PIN_GLOBAL_NS,
};
+#endif
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
@@ -47,7 +58,6 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp)
void *data = (void *)(long)skb->data;
struct bpf_sock_tuple *result;
struct ethhdr *eth;
- __u64 tuple_len;
__u8 proto = 0;
__u64 ihl_len;
@@ -84,13 +94,13 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp)
return NULL;
*tcp = (proto == IPPROTO_TCP);
+ __sink(ihl_len);
return result;
}
static inline int
handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
- struct bpf_sock_tuple ln = {0};
struct bpf_sock *sk;
const int zero = 0;
size_t tuple_len;
@@ -122,7 +132,6 @@ assign:
static inline int
handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
- struct bpf_sock_tuple ln = {0};
struct bpf_sock *sk;
const int zero = 0;
size_t tuple_len;
@@ -159,13 +168,12 @@ assign:
return ret;
}
-SEC("classifier/sk_assign_test")
+SEC("tc")
int bpf_sk_assign_test(struct __sk_buff *skb)
{
- struct bpf_sock_tuple *tuple, ln = {0};
+ struct bpf_sock_tuple *tuple;
bool ipv4 = false;
bool tcp = false;
- int tuple_len;
int ret = 0;
tuple = get_tuple(skb, &ipv4, &tcp);
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c
new file mode 100644
index 000000000000..dcf46adfda04
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
+#define IPROUTE2_HAVE_LIBBPF
+#include "test_sk_assign.c"
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
index 1032b292af5b..71f844b9b902 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
@@ -64,36 +64,48 @@ static const int PROG_DONE = 1;
static const __u32 KEY_SERVER_A = SERVER_A;
static const __u32 KEY_SERVER_B = SERVER_B;
+static const __u16 SRC_PORT = bpf_htons(8008);
+static const __u32 SRC_IP4 = IP4(127, 0, 0, 2);
+static const __u32 SRC_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000002);
+
static const __u16 DST_PORT = 7007; /* Host byte order */
static const __u32 DST_IP4 = IP4(127, 0, 0, 1);
static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001);
-SEC("sk_lookup/lookup_pass")
+SEC("sk_lookup")
int lookup_pass(struct bpf_sk_lookup *ctx)
{
return SK_PASS;
}
-SEC("sk_lookup/lookup_drop")
+SEC("sk_lookup")
int lookup_drop(struct bpf_sk_lookup *ctx)
{
return SK_DROP;
}
-SEC("sk_reuseport/reuse_pass")
+SEC("sk_lookup")
+int check_ifindex(struct bpf_sk_lookup *ctx)
+{
+ if (ctx->ingress_ifindex == 1)
+ return SK_DROP;
+ return SK_PASS;
+}
+
+SEC("sk_reuseport")
int reuseport_pass(struct sk_reuseport_md *ctx)
{
return SK_PASS;
}
-SEC("sk_reuseport/reuse_drop")
+SEC("sk_reuseport")
int reuseport_drop(struct sk_reuseport_md *ctx)
{
return SK_DROP;
}
/* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */
-SEC("sk_lookup/redir_port")
+SEC("sk_lookup")
int redir_port(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -112,7 +124,7 @@ int redir_port(struct bpf_sk_lookup *ctx)
}
/* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */
-SEC("sk_lookup/redir_ip4")
+SEC("sk_lookup")
int redir_ip4(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -135,7 +147,7 @@ int redir_ip4(struct bpf_sk_lookup *ctx)
}
/* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */
-SEC("sk_lookup/redir_ip6")
+SEC("sk_lookup")
int redir_ip6(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -160,7 +172,7 @@ int redir_ip6(struct bpf_sk_lookup *ctx)
return err ? SK_DROP : SK_PASS;
}
-SEC("sk_lookup/select_sock_a")
+SEC("sk_lookup")
int select_sock_a(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -175,7 +187,7 @@ int select_sock_a(struct bpf_sk_lookup *ctx)
return err ? SK_DROP : SK_PASS;
}
-SEC("sk_lookup/select_sock_a_no_reuseport")
+SEC("sk_lookup")
int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -190,7 +202,7 @@ int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
return err ? SK_DROP : SK_PASS;
}
-SEC("sk_reuseport/select_sock_b")
+SEC("sk_reuseport")
int select_sock_b(struct sk_reuseport_md *ctx)
{
__u32 key = KEY_SERVER_B;
@@ -201,7 +213,7 @@ int select_sock_b(struct sk_reuseport_md *ctx)
}
/* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */
-SEC("sk_lookup/sk_assign_eexist")
+SEC("sk_lookup")
int sk_assign_eexist(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -234,7 +246,7 @@ out:
}
/* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */
-SEC("sk_lookup/sk_assign_replace_flag")
+SEC("sk_lookup")
int sk_assign_replace_flag(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -266,7 +278,7 @@ out:
}
/* Check that bpf_sk_assign(sk=NULL) is accepted. */
-SEC("sk_lookup/sk_assign_null")
+SEC("sk_lookup")
int sk_assign_null(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk = NULL;
@@ -309,7 +321,7 @@ out:
}
/* Check that selected sk is accessible through context. */
-SEC("sk_lookup/access_ctx_sk")
+SEC("sk_lookup")
int access_ctx_sk(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk1 = NULL, *sk2 = NULL;
@@ -375,11 +387,11 @@ out:
* are not covered because they give bogus results, that is the
* verifier ignores the offset.
*/
-SEC("sk_lookup/ctx_narrow_access")
+SEC("sk_lookup")
int ctx_narrow_access(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
- int err, family;
+ __u32 val_u32;
bool v4;
v4 = (ctx->family == AF_INET);
@@ -398,11 +410,22 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
if (LSW(ctx->protocol, 0) != IPPROTO_TCP)
return SK_DROP;
- /* Narrow loads from remote_port field. Expect non-0 value. */
- if (LSB(ctx->remote_port, 0) == 0 && LSB(ctx->remote_port, 1) == 0 &&
- LSB(ctx->remote_port, 2) == 0 && LSB(ctx->remote_port, 3) == 0)
+ /* Narrow loads from remote_port field. Expect SRC_PORT. */
+ if (LSB(ctx->remote_port, 0) != ((SRC_PORT >> 0) & 0xff) ||
+ LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff))
+ return SK_DROP;
+ if (LSW(ctx->remote_port, 0) != SRC_PORT)
return SK_DROP;
- if (LSW(ctx->remote_port, 0) == 0)
+
+ /*
+ * NOTE: 4-byte load from bpf_sk_lookup at remote_port offset
+ * is quirky. It gets rewritten by the access converter to a
+ * 2-byte load for backward compatibility. Treating the load
+ * result as a be16 value makes the code portable across
+ * little- and big-endian platforms.
+ */
+ val_u32 = *(__u32 *)&ctx->remote_port;
+ if (val_u32 != SRC_PORT)
return SK_DROP;
/* Narrow loads from local_port field. Expect DST_PORT. */
@@ -415,11 +438,14 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
/* Narrow loads from IPv4 fields */
if (v4) {
- /* Expect non-0.0.0.0 in remote_ip4 */
- if (LSB(ctx->remote_ip4, 0) == 0 && LSB(ctx->remote_ip4, 1) == 0 &&
- LSB(ctx->remote_ip4, 2) == 0 && LSB(ctx->remote_ip4, 3) == 0)
+ /* Expect SRC_IP4 in remote_ip4 */
+ if (LSB(ctx->remote_ip4, 0) != ((SRC_IP4 >> 0) & 0xff) ||
+ LSB(ctx->remote_ip4, 1) != ((SRC_IP4 >> 8) & 0xff) ||
+ LSB(ctx->remote_ip4, 2) != ((SRC_IP4 >> 16) & 0xff) ||
+ LSB(ctx->remote_ip4, 3) != ((SRC_IP4 >> 24) & 0xff))
return SK_DROP;
- if (LSW(ctx->remote_ip4, 0) == 0 && LSW(ctx->remote_ip4, 1) == 0)
+ if (LSW(ctx->remote_ip4, 0) != ((SRC_IP4 >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip4, 1) != ((SRC_IP4 >> 16) & 0xffff))
return SK_DROP;
/* Expect DST_IP4 in local_ip4 */
@@ -448,20 +474,32 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
/* Narrow loads from IPv6 fields */
if (!v4) {
- /* Expect non-:: IP in remote_ip6 */
- if (LSB(ctx->remote_ip6[0], 0) == 0 && LSB(ctx->remote_ip6[0], 1) == 0 &&
- LSB(ctx->remote_ip6[0], 2) == 0 && LSB(ctx->remote_ip6[0], 3) == 0 &&
- LSB(ctx->remote_ip6[1], 0) == 0 && LSB(ctx->remote_ip6[1], 1) == 0 &&
- LSB(ctx->remote_ip6[1], 2) == 0 && LSB(ctx->remote_ip6[1], 3) == 0 &&
- LSB(ctx->remote_ip6[2], 0) == 0 && LSB(ctx->remote_ip6[2], 1) == 0 &&
- LSB(ctx->remote_ip6[2], 2) == 0 && LSB(ctx->remote_ip6[2], 3) == 0 &&
- LSB(ctx->remote_ip6[3], 0) == 0 && LSB(ctx->remote_ip6[3], 1) == 0 &&
- LSB(ctx->remote_ip6[3], 2) == 0 && LSB(ctx->remote_ip6[3], 3) == 0)
+ /* Expect SRC_IP6 in remote_ip6 */
+ if (LSB(ctx->remote_ip6[0], 0) != ((SRC_IP6[0] >> 0) & 0xff) ||
+ LSB(ctx->remote_ip6[0], 1) != ((SRC_IP6[0] >> 8) & 0xff) ||
+ LSB(ctx->remote_ip6[0], 2) != ((SRC_IP6[0] >> 16) & 0xff) ||
+ LSB(ctx->remote_ip6[0], 3) != ((SRC_IP6[0] >> 24) & 0xff) ||
+ LSB(ctx->remote_ip6[1], 0) != ((SRC_IP6[1] >> 0) & 0xff) ||
+ LSB(ctx->remote_ip6[1], 1) != ((SRC_IP6[1] >> 8) & 0xff) ||
+ LSB(ctx->remote_ip6[1], 2) != ((SRC_IP6[1] >> 16) & 0xff) ||
+ LSB(ctx->remote_ip6[1], 3) != ((SRC_IP6[1] >> 24) & 0xff) ||
+ LSB(ctx->remote_ip6[2], 0) != ((SRC_IP6[2] >> 0) & 0xff) ||
+ LSB(ctx->remote_ip6[2], 1) != ((SRC_IP6[2] >> 8) & 0xff) ||
+ LSB(ctx->remote_ip6[2], 2) != ((SRC_IP6[2] >> 16) & 0xff) ||
+ LSB(ctx->remote_ip6[2], 3) != ((SRC_IP6[2] >> 24) & 0xff) ||
+ LSB(ctx->remote_ip6[3], 0) != ((SRC_IP6[3] >> 0) & 0xff) ||
+ LSB(ctx->remote_ip6[3], 1) != ((SRC_IP6[3] >> 8) & 0xff) ||
+ LSB(ctx->remote_ip6[3], 2) != ((SRC_IP6[3] >> 16) & 0xff) ||
+ LSB(ctx->remote_ip6[3], 3) != ((SRC_IP6[3] >> 24) & 0xff))
return SK_DROP;
- if (LSW(ctx->remote_ip6[0], 0) == 0 && LSW(ctx->remote_ip6[0], 1) == 0 &&
- LSW(ctx->remote_ip6[1], 0) == 0 && LSW(ctx->remote_ip6[1], 1) == 0 &&
- LSW(ctx->remote_ip6[2], 0) == 0 && LSW(ctx->remote_ip6[2], 1) == 0 &&
- LSW(ctx->remote_ip6[3], 0) == 0 && LSW(ctx->remote_ip6[3], 1) == 0)
+ if (LSW(ctx->remote_ip6[0], 0) != ((SRC_IP6[0] >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip6[0], 1) != ((SRC_IP6[0] >> 16) & 0xffff) ||
+ LSW(ctx->remote_ip6[1], 0) != ((SRC_IP6[1] >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip6[1], 1) != ((SRC_IP6[1] >> 16) & 0xffff) ||
+ LSW(ctx->remote_ip6[2], 0) != ((SRC_IP6[2] >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip6[2], 1) != ((SRC_IP6[2] >> 16) & 0xffff) ||
+ LSW(ctx->remote_ip6[3], 0) != ((SRC_IP6[3] >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip6[3], 1) != ((SRC_IP6[3] >> 16) & 0xffff))
return SK_DROP;
/* Expect DST_IP6 in local_ip6 */
if (LSB(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xff) ||
@@ -533,7 +571,7 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
}
/* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */
-SEC("sk_lookup/sk_assign_esocknosupport")
+SEC("sk_lookup")
int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -558,28 +596,28 @@ out:
return ret;
}
-SEC("sk_lookup/multi_prog_pass1")
+SEC("sk_lookup")
int multi_prog_pass1(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
-SEC("sk_lookup/multi_prog_pass2")
+SEC("sk_lookup")
int multi_prog_pass2(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
-SEC("sk_lookup/multi_prog_drop1")
+SEC("sk_lookup")
int multi_prog_drop1(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_DROP;
}
-SEC("sk_lookup/multi_prog_drop2")
+SEC("sk_lookup")
int multi_prog_drop2(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
@@ -603,25 +641,20 @@ static __always_inline int select_server_a(struct bpf_sk_lookup *ctx)
return SK_PASS;
}
-SEC("sk_lookup/multi_prog_redir1")
+SEC("sk_lookup")
int multi_prog_redir1(struct bpf_sk_lookup *ctx)
{
- int ret;
-
- ret = select_server_a(ctx);
+ (void)select_server_a(ctx);
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
-SEC("sk_lookup/multi_prog_redir2")
+SEC("sk_lookup")
int multi_prog_redir2(struct bpf_sk_lookup *ctx)
{
- int ret;
-
- ret = select_server_a(ctx);
+ (void)select_server_a(ctx);
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
char _license[] SEC("license") = "Dual BSD/GPL";
-__u32 _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
index e83d0b48d80c..e9efc3263022 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
@@ -15,7 +15,6 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
@@ -24,8 +23,8 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
bool *ipv4)
{
struct bpf_sock_tuple *result;
+ __u64 ihl_len = 0;
__u8 proto = 0;
- __u64 ihl_len;
if (eth_proto == bpf_htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)(data + nh_off);
@@ -53,8 +52,8 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
return result;
}
-SEC("classifier/sk_lookup_success")
-int bpf_sk_lookup_test0(struct __sk_buff *skb)
+SEC("?tc")
+int sk_lookup_success(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
@@ -79,8 +78,8 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb)
return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
}
-SEC("classifier/sk_lookup_success_simple")
-int bpf_sk_lookup_test1(struct __sk_buff *skb)
+SEC("?tc")
+int sk_lookup_success_simple(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -91,8 +90,8 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/fail_use_after_free")
-int bpf_sk_lookup_uaf(struct __sk_buff *skb)
+SEC("?tc")
+int err_use_after_free(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -106,12 +105,11 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb)
return family;
}
-SEC("classifier/fail_modify_sk_pointer")
-int bpf_sk_lookup_modptr(struct __sk_buff *skb)
+SEC("?tc")
+int err_modify_sk_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
- __u32 family;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
if (sk) {
@@ -121,12 +119,11 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/fail_modify_sk_or_null_pointer")
-int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
+SEC("?tc")
+int err_modify_sk_or_null_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
- __u32 family;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
sk += 1;
@@ -135,8 +132,8 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/fail_no_release")
-int bpf_sk_lookup_test2(struct __sk_buff *skb)
+SEC("?tc")
+int err_no_release(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -144,8 +141,8 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/fail_release_twice")
-int bpf_sk_lookup_test3(struct __sk_buff *skb)
+SEC("?tc")
+int err_release_twice(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -156,8 +153,8 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/fail_release_unchecked")
-int bpf_sk_lookup_test4(struct __sk_buff *skb)
+SEC("?tc")
+int err_release_unchecked(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -173,8 +170,8 @@ void lookup_no_release(struct __sk_buff *skb)
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
}
-SEC("classifier/fail_no_release_subcall")
-int bpf_sk_lookup_test5(struct __sk_buff *skb)
+SEC("?tc")
+int err_no_release_subcall(struct __sk_buff *skb)
{
lookup_no_release(skb);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_trace_itself.c b/tools/testing/selftests/bpf/progs/test_sk_storage_trace_itself.c
new file mode 100644
index 000000000000..59ef72d02a61
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sk_storage_trace_itself.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} sk_stg_map SEC(".maps");
+
+SEC("fentry/bpf_sk_storage_free")
+int BPF_PROG(trace_bpf_sk_storage_free, struct sock *sk)
+{
+ int *value;
+
+ value = bpf_sk_storage_get(&sk_stg_map, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+
+ if (value)
+ *value = 1;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
new file mode 100644
index 000000000000..40531e56776e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+
+struct sk_stg {
+ __u32 pid;
+ __u32 last_notclose_state;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct sk_stg);
+} sk_stg_map SEC(".maps");
+
+/* Testing delete */
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} del_sk_stg_map SEC(".maps");
+
+char task_comm[16] = "";
+
+SEC("tp_btf/inet_sock_set_state")
+int BPF_PROG(trace_inet_sock_set_state, struct sock *sk, int oldstate,
+ int newstate)
+{
+ struct sk_stg *stg;
+
+ if (newstate == BPF_TCP_CLOSE)
+ return 0;
+
+ stg = bpf_sk_storage_get(&sk_stg_map, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!stg)
+ return 0;
+
+ stg->last_notclose_state = newstate;
+
+ bpf_sk_storage_delete(&del_sk_stg_map, sk);
+
+ return 0;
+}
+
+static void set_task_info(struct sock *sk)
+{
+ struct task_struct *task;
+ struct sk_stg *stg;
+
+ stg = bpf_sk_storage_get(&sk_stg_map, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!stg)
+ return;
+
+ stg->pid = bpf_get_current_pid_tgid();
+
+ task = (struct task_struct *)bpf_get_current_task();
+ bpf_core_read_str(&stg->comm, sizeof(stg->comm), &task->comm);
+ bpf_core_read_str(&task_comm, sizeof(task_comm), &task->comm);
+}
+
+SEC("fentry/inet_csk_listen_start")
+int BPF_PROG(trace_inet_csk_listen_start, struct sock *sk)
+{
+ set_task_info(sk);
+
+ return 0;
+}
+
+SEC("fentry/tcp_connect")
+int BPF_PROG(trace_tcp_connect, struct sock *sk)
+{
+ set_task_info(sk);
+
+ return 0;
+}
+
+SEC("fexit/inet_csk_accept")
+int BPF_PROG(inet_csk_accept, struct sock *sk, struct proto_accept_arg *arg,
+ struct sock *accepted_sk)
+{
+ set_task_info(accepted_sk);
+
+ return 0;
+}
+
+SEC("tp_btf/tcp_retransmit_synack")
+int BPF_PROG(tcp_retransmit_synack, struct sock* sk, struct request_sock* req)
+{
+ /* load only test */
+ bpf_sk_storage_get(&sk_stg_map, sk, 0, 0);
+ bpf_sk_storage_get(&sk_stg_map, req->sk, 0, 0);
+ return 0;
+}
+
+SEC("tp_btf/tcp_bad_csum")
+int BPF_PROG(tcp_bad_csum, struct sk_buff* skb)
+{
+ bpf_sk_storage_get(&sk_stg_map, skb->sk, 0, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
deleted file mode 100644
index 552f2090665c..000000000000
--- a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018 Facebook
-
-#include <linux/bpf.h>
-#include <linux/pkt_cls.h>
-
-#include <string.h>
-
-#include <bpf/bpf_helpers.h>
-
-#define NUM_CGROUP_LEVELS 4
-
-struct bpf_map_def SEC("maps") cgroup_ids = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u64),
- .max_entries = NUM_CGROUP_LEVELS,
-};
-
-static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level)
-{
- __u64 id;
-
- /* [1] &level passed to external function that may change it, it's
- * incompatible with loop unroll.
- */
- id = bpf_skb_ancestor_cgroup_id(skb, level);
- bpf_map_update_elem(&cgroup_ids, &level, &id, 0);
-}
-
-SEC("cgroup_id_logger")
-int log_cgroup_id(struct __sk_buff *skb)
-{
- /* Loop unroll can't be used here due to [1]. Unrolling manually.
- * Number of calls should be in sync with NUM_CGROUP_LEVELS.
- */
- log_nth_level(skb, 0);
- log_nth_level(skb, 1);
- log_nth_level(skb, 2);
- log_nth_level(skb, 3);
-
- return TC_ACT_OK;
-}
-
-int _version SEC("version") = 1;
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
index b02ea589ce7e..a724a70c6700 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
@@ -3,13 +3,14 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
+#include "bpf_compiler.h"
+
char _license[] SEC("license") = "GPL";
-SEC("skb_ctx")
+SEC("tc")
int process(struct __sk_buff *skb)
{
- #pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < 5; i++) {
if (skb->cb[i] != i + 1)
return 1;
@@ -25,6 +26,12 @@ int process(struct __sk_buff *skb)
return 1;
if (skb->gso_size != 10)
return 1;
+ if (skb->ingress_ifindex != 11)
+ return 1;
+ if (skb->ifindex != 1)
+ return 1;
+ if (skb->hwtstamp != 11)
+ return 1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_skb_helpers.c b/tools/testing/selftests/bpf/progs/test_skb_helpers.c
index bb3fbf1a29e3..507215791c5b 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_helpers.c
@@ -14,7 +14,7 @@ struct {
char _license[] SEC("license") = "GPL";
-SEC("classifier/test_skb_helpers")
+SEC("tc")
int test_skb_helpers(struct __sk_buff *skb)
{
struct task_struct *task;
diff --git a/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c b/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c
new file mode 100644
index 000000000000..4cfa42aa9436
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 Hengqi Chen */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tracing_net.h"
+
+const volatile pid_t my_pid = 0;
+char path[256] = {};
+
+SEC("fentry/unix_listen")
+int BPF_PROG(unix_listen, struct socket *sock, int backlog)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+ struct unix_sock *unix_sk;
+ int i, len;
+
+ if (pid != my_pid)
+ return 0;
+
+ unix_sk = (struct unix_sock *)bpf_skc_to_unix_sock(sock->sk);
+ if (!unix_sk)
+ return 0;
+
+ if (unix_sk->addr->name->sun_path[0])
+ return 0;
+
+ len = unix_sk->addr->len - sizeof(short);
+ path[0] = '@';
+ for (i = 1; i < len; i++) {
+ if (i >= (int)sizeof(struct sockaddr_un))
+ break;
+
+ path[i] = unix_sk->addr->name->sun_path[i];
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c
index 374ccef704e1..adece9f91f58 100644
--- a/tools/testing/selftests/bpf/progs/test_skeleton.c
+++ b/tools/testing/selftests/bpf/progs/test_skeleton.c
@@ -5,6 +5,8 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#define __read_mostly SEC(".data.read_mostly")
+
struct s {
int a;
long long b;
@@ -38,10 +40,37 @@ extern int LINUX_KERNEL_VERSION __kconfig;
bool bpf_syscall = 0;
int kern_ver = 0;
+struct s out5 = {};
+
+
+const volatile int in_dynarr_sz SEC(".rodata.dyn");
+const volatile int in_dynarr[4] SEC(".rodata.dyn") = { -1, -2, -3, -4 };
+
+int out_dynarr[4] SEC(".data.dyn") = { 1, 2, 3, 4 };
+
+int read_mostly_var __read_mostly;
+int out_mostly_var;
+
+char huge_arr[16 * 1024 * 1024];
+
+/* non-mmapable custom .data section */
+
+struct my_value { int x, y, z; };
+
+__hidden int zero_key SEC(".data.non_mmapable");
+static struct my_value zero_value SEC(".data.non_mmapable");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct my_value);
+ __uint(max_entries, 1);
+} my_map SEC(".maps");
+
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
- static volatile struct s out5;
+ int i;
out1 = in1;
out2 = in2;
@@ -53,6 +82,16 @@ int handler(const void *ctx)
bpf_syscall = CONFIG_BPF_SYSCALL;
kern_ver = LINUX_KERNEL_VERSION;
+ for (i = 0; i < in_dynarr_sz; i++)
+ out_dynarr[i] = in_dynarr[i];
+
+ out_mostly_var = read_mostly_var;
+
+ huge_arr[sizeof(huge_arr) - 1] = 123;
+
+ /* make sure zero_key and zero_value are not optimized out */
+ bpf_map_update_elem(&my_map, &zero_key, &zero_value, BPF_ANY);
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c b/tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c
index 45e8fc75a739..996b177324ba 100644
--- a/tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c
@@ -24,8 +24,7 @@ struct {
__type(value, __u64);
} socket_storage SEC(".maps");
-SEC("sk_msg")
-int prog_msg_verdict(struct sk_msg_md *msg)
+static int prog_msg_verdict_common(struct sk_msg_md *msg)
{
struct task_struct *task = (struct task_struct *)bpf_get_current_task();
int verdict = SK_PASS;
@@ -44,4 +43,28 @@ int prog_msg_verdict(struct sk_msg_md *msg)
return verdict;
}
+SEC("sk_msg")
+int prog_msg_verdict(struct sk_msg_md *msg)
+{
+ return prog_msg_verdict_common(msg);
+}
+
+SEC("sk_msg")
+int prog_msg_verdict_clone(struct sk_msg_md *msg)
+{
+ return prog_msg_verdict_common(msg);
+}
+
+SEC("sk_msg")
+int prog_msg_verdict_clone2(struct sk_msg_md *msg)
+{
+ return prog_msg_verdict_common(msg);
+}
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ return SK_PASS;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_snprintf.c b/tools/testing/selftests/bpf/progs/test_snprintf.c
new file mode 100644
index 000000000000..8fda07544023
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_snprintf.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google LLC. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+__u32 pid = 0;
+
+char num_out[64] = {};
+long num_ret = 0;
+
+char ip_out[64] = {};
+long ip_ret = 0;
+
+char sym_out[64] = {};
+long sym_ret = 0;
+
+char addr_out[64] = {};
+long addr_ret = 0;
+
+char str_out[64] = {};
+long str_ret = 0;
+
+char over_out[6] = {};
+long over_ret = 0;
+
+char pad_out[10] = {};
+long pad_ret = 0;
+
+char noarg_out[64] = {};
+long noarg_ret = 0;
+
+long nobuf_ret = 0;
+
+extern const void schedule __ksym;
+
+SEC("raw_tp/sys_enter")
+int handler(const void *ctx)
+{
+ /* Convenient values to pretty-print */
+ const __u8 ex_ipv4[] = {127, 0, 0, 1};
+ const __u8 ex_ipv6[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1};
+ static const char str1[] = "str1";
+ static const char longstr[] = "longstr";
+
+ if ((int)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Integer types */
+ num_ret = BPF_SNPRINTF(num_out, sizeof(num_out),
+ "%d %u %x %li %llu %lX",
+ -8, 9, 150, -424242, 1337, 0xDABBAD00);
+ /* IP addresses */
+ ip_ret = BPF_SNPRINTF(ip_out, sizeof(ip_out), "%pi4 %pI6",
+ &ex_ipv4, &ex_ipv6);
+ /* Symbol lookup formatting */
+ sym_ret = BPF_SNPRINTF(sym_out, sizeof(sym_out), "%ps %pS %pB",
+ &schedule, &schedule, &schedule);
+ /* Kernel pointers */
+ addr_ret = BPF_SNPRINTF(addr_out, sizeof(addr_out), "%pK %px %p",
+ 0, 0xFFFF00000ADD4E55, 0xFFFF00000ADD4E55);
+ /* Strings and single-byte character embedding */
+ str_ret = BPF_SNPRINTF(str_out, sizeof(str_out), "%s % 9c %+2c %-3c %04c %0c %+05s",
+ str1, 'a', 'b', 'c', 'd', 'e', longstr);
+ /* Overflow */
+ over_ret = BPF_SNPRINTF(over_out, sizeof(over_out), "%%overflow");
+ /* Padding of fixed width numbers */
+ pad_ret = BPF_SNPRINTF(pad_out, sizeof(pad_out), "%5d %0900000X", 4, 4);
+ /* No args */
+ noarg_ret = BPF_SNPRINTF(noarg_out, sizeof(noarg_out), "simple case");
+ /* No buffer */
+ nobuf_ret = BPF_SNPRINTF(NULL, 0, "only interested in length %d", 60);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_snprintf_single.c b/tools/testing/selftests/bpf/progs/test_snprintf_single.c
new file mode 100644
index 000000000000..3095837334d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_snprintf_single.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google LLC. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* The format string is filled from the userspace such that loading fails */
+const char fmt[10];
+
+SEC("raw_tp/sys_enter")
+int handler(const void *ctx)
+{
+ unsigned long long arg = 42;
+
+ bpf_snprintf(NULL, 0, fmt, &arg, sizeof(arg));
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c
index 81b57b9aaaea..196844be349c 100644
--- a/tools/testing/selftests/bpf/progs/test_sock_fields.c
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c
@@ -7,11 +7,11 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-#include "bpf_tcp_helpers.h"
enum bpf_linum_array_idx {
EGRESS_LINUM_IDX,
INGRESS_LINUM_IDX,
+ READ_SK_DST_PORT_LINUM_IDX,
__NR_BPF_LINUM_ARRAY_IDX,
};
@@ -41,6 +41,10 @@ struct {
__type(value, struct bpf_spinlock_cnt);
} sk_pkt_out_cnt10 SEC(".maps");
+struct tcp_sock {
+ __u32 lsndtime;
+} __attribute__((preserve_access_index));
+
struct bpf_tcp_sock listen_tp = {};
struct sockaddr_in6 srv_sa6 = {};
struct bpf_tcp_sock cli_tp = {};
@@ -113,14 +117,14 @@ static void tpcpy(struct bpf_tcp_sock *dst,
#define RET_LOG() ({ \
linum = __LINE__; \
- bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_NOEXIST); \
+ bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_ANY); \
return CG_OK; \
})
SEC("cgroup_skb/egress")
int egress_read_sock_fields(struct __sk_buff *skb)
{
- struct bpf_spinlock_cnt cli_cnt_init = { .lock = 0, .cnt = 0xeB9F };
+ struct bpf_spinlock_cnt cli_cnt_init = { .lock = {}, .cnt = 0xeB9F };
struct bpf_spinlock_cnt *pkt_out_cnt, *pkt_out_cnt10;
struct bpf_tcp_sock *tp, *tp_ret;
struct bpf_sock *sk, *sk_ret;
@@ -133,11 +137,11 @@ int egress_read_sock_fields(struct __sk_buff *skb)
if (!sk)
RET_LOG();
- /* Not the testing egress traffic or
- * TCP_LISTEN (10) socket will be copied at the ingress side.
+ /* Not testing the egress traffic or the listening socket,
+ * which are covered by the cgroup_skb/ingress test program.
*/
if (sk->family != AF_INET6 || !is_loopback6(sk->src_ip6) ||
- sk->state == 10)
+ sk->state == BPF_TCP_LISTEN)
return CG_OK;
if (sk->src_port == bpf_ntohs(srv_sa6.sin6_port)) {
@@ -231,8 +235,8 @@ int ingress_read_sock_fields(struct __sk_buff *skb)
sk->src_port != bpf_ntohs(srv_sa6.sin6_port))
return CG_OK;
- /* Only interested in TCP_LISTEN */
- if (sk->state != 10)
+ /* Only interested in the listening socket */
+ if (sk->state != BPF_TCP_LISTEN)
return CG_OK;
/* It must be a fullsock for cgroup_skb/ingress prog */
@@ -250,4 +254,57 @@ int ingress_read_sock_fields(struct __sk_buff *skb)
return CG_OK;
}
+/*
+ * NOTE: 4-byte load from bpf_sock at dst_port offset is quirky. It
+ * gets rewritten by the access converter to a 2-byte load for
+ * backward compatibility. Treating the load result as a be16 value
+ * makes the code portable across little- and big-endian platforms.
+ */
+static __noinline bool sk_dst_port__load_word(struct bpf_sock *sk)
+{
+ __u32 *word = (__u32 *)&sk->dst_port;
+ return word[0] == bpf_htons(0xcafe);
+}
+
+static __noinline bool sk_dst_port__load_half(struct bpf_sock *sk)
+{
+ __u16 *half;
+
+ asm volatile ("");
+ half = (__u16 *)&sk->dst_port;
+ return half[0] == bpf_htons(0xcafe);
+}
+
+static __noinline bool sk_dst_port__load_byte(struct bpf_sock *sk)
+{
+ __u8 *byte = (__u8 *)&sk->dst_port;
+ return byte[0] == 0xca && byte[1] == 0xfe;
+}
+
+SEC("cgroup_skb/egress")
+int read_sk_dst_port(struct __sk_buff *skb)
+{
+ __u32 linum, linum_idx;
+ struct bpf_sock *sk;
+
+ linum_idx = READ_SK_DST_PORT_LINUM_IDX;
+
+ sk = skb->sk;
+ if (!sk)
+ RET_LOG();
+
+ /* Ignore everything but the SYN from the client socket */
+ if (sk->state != BPF_TCP_SYN_SENT)
+ return CG_OK;
+
+ if (!sk_dst_port__load_word(sk))
+ RET_LOG();
+ if (!sk_dst_port__load_half(sk))
+ RET_LOG();
+ if (!sk_dst_port__load_byte(sk))
+ RET_LOG();
+
+ return CG_OK;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c
new file mode 100644
index 000000000000..1c7941a4ad00
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 ByteDance */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE __PAGE_SIZE
+#endif
+#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2)
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} sock_map_rx SEC(".maps");
+
+long change_tail_ret = 1;
+
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ char *data, *data_end;
+
+ bpf_skb_pull_data(skb, 1);
+ data = (char *)(unsigned long)skb->data;
+ data_end = (char *)(unsigned long)skb->data_end;
+
+ if (data + 1 > data_end)
+ return SK_PASS;
+
+ if (data[0] == 'T') { /* Trim the packet */
+ change_tail_ret = bpf_skb_change_tail(skb, skb->len - 1, 0);
+ return SK_PASS;
+ } else if (data[0] == 'G') { /* Grow the packet */
+ change_tail_ret = bpf_skb_change_tail(skb, skb->len + 1, 0);
+ return SK_PASS;
+ } else if (data[0] == 'E') { /* Error */
+ change_tail_ret = bpf_skb_change_tail(skb, BPF_SKB_MAX_LEN, 0);
+ return SK_PASS;
+ }
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c b/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c
new file mode 100644
index 000000000000..29314805ce42
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c
@@ -0,0 +1,32 @@
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_rx SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_tx SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_msg SEC(".maps");
+
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ return SK_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
index 1858435de7aa..f48f85f1bd70 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
@@ -14,6 +14,7 @@
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_misc.h"
/* Sockmap sample program connects a client and a backend together
* using cgroups.
@@ -91,7 +92,7 @@ struct {
__uint(value_size, sizeof(int));
} tls_sock_map SEC(".maps");
-SEC("sk_skb1")
+SEC("sk_skb/stream_parser")
int bpf_prog1(struct __sk_buff *skb)
{
int *f, two = 2;
@@ -103,7 +104,7 @@ int bpf_prog1(struct __sk_buff *skb)
return skb->len;
}
-SEC("sk_skb2")
+SEC("sk_skb/stream_verdict")
int bpf_prog2(struct __sk_buff *skb)
{
__u32 lport = skb->local_port;
@@ -111,12 +112,15 @@ int bpf_prog2(struct __sk_buff *skb)
int len, *f, ret, zero = 0;
__u64 flags = 0;
+ __sink(rport);
if (lport == 10000)
ret = 10;
else
ret = 1;
len = (__u32)skb->data_end - (__u32)skb->data;
+ __sink(len);
+
f = bpf_map_lookup_elem(&sock_skb_opts, &zero);
if (f && *f) {
ret = 3;
@@ -147,7 +151,7 @@ static inline void bpf_write_pass(struct __sk_buff *skb, int offset)
memcpy(c + offset, "PASS", 4);
}
-SEC("sk_skb3")
+SEC("sk_skb/stream_verdict")
int bpf_prog3(struct __sk_buff *skb)
{
int err, *f, ret = SK_PASS;
@@ -173,14 +177,10 @@ int bpf_prog3(struct __sk_buff *skb)
return bpf_sk_redirect_hash(skb, &tls_sock_map, &ret, flags);
#endif
}
- f = bpf_map_lookup_elem(&sock_skb_opts, &one);
- if (f && *f)
- ret = SK_DROP;
err = bpf_skb_adjust_room(skb, 4, 0, 0);
if (err)
return SK_DROP;
bpf_write_pass(skb, 13);
-tls_out:
return ret;
}
@@ -188,8 +188,7 @@ SEC("sockops")
int bpf_sockmap(struct bpf_sock_ops *skops)
{
__u32 lport, rport;
- int op, err = 0, index, key, ret;
-
+ int op, ret;
op = (int) skops->op;
@@ -201,10 +200,10 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
if (lport == 10000) {
ret = 1;
#ifdef SOCKMAP
- err = bpf_sock_map_update(skops, &sock_map, &ret,
+ bpf_sock_map_update(skops, &sock_map, &ret,
BPF_NOEXIST);
#else
- err = bpf_sock_hash_update(skops, &sock_map, &ret,
+ bpf_sock_hash_update(skops, &sock_map, &ret,
BPF_NOEXIST);
#endif
}
@@ -216,10 +215,10 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
if (bpf_ntohl(rport) == 10001) {
ret = 10;
#ifdef SOCKMAP
- err = bpf_sock_map_update(skops, &sock_map, &ret,
+ bpf_sock_map_update(skops, &sock_map, &ret,
BPF_NOEXIST);
#else
- err = bpf_sock_hash_update(skops, &sock_map, &ret,
+ bpf_sock_hash_update(skops, &sock_map, &ret,
BPF_NOEXIST);
#endif
}
@@ -231,11 +230,11 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
return 0;
}
-SEC("sk_msg1")
+SEC("sk_msg")
int bpf_prog4(struct sk_msg_md *msg)
{
int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
- int *start, *end, *start_push, *end_push, *start_pop, *pop;
+ int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -249,8 +248,11 @@ int bpf_prog4(struct sk_msg_md *msg)
bpf_msg_pull_data(msg, *start, *end, 0);
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_DROP;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
if (start_pop && pop)
@@ -258,11 +260,12 @@ int bpf_prog4(struct sk_msg_md *msg)
return SK_PASS;
}
-SEC("sk_msg2")
+SEC("sk_msg")
int bpf_prog6(struct sk_msg_md *msg)
{
int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f;
+ int err = 0;
__u64 flags = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
@@ -279,8 +282,11 @@ int bpf_prog6(struct sk_msg_md *msg)
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_DROP;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
@@ -299,7 +305,7 @@ int bpf_prog6(struct sk_msg_md *msg)
#endif
}
-SEC("sk_msg3")
+SEC("sk_msg")
int bpf_prog8(struct sk_msg_md *msg)
{
void *data_end = (void *)(long) msg->data_end;
@@ -314,9 +320,14 @@ int bpf_prog8(struct sk_msg_md *msg)
} else {
return SK_DROP;
}
+
+ __sink(data_end);
+ __sink(data);
+
return SK_PASS;
}
-SEC("sk_msg4")
+
+SEC("sk_msg")
int bpf_prog9(struct sk_msg_md *msg)
{
void *data_end = (void *)(long) msg->data_end;
@@ -334,11 +345,11 @@ int bpf_prog9(struct sk_msg_md *msg)
return SK_PASS;
}
-SEC("sk_msg5")
+SEC("sk_msg")
int bpf_prog10(struct sk_msg_md *msg)
{
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
- int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
+ int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -352,8 +363,11 @@ int bpf_prog10(struct sk_msg_md *msg)
bpf_msg_pull_data(msg, *start, *end, 0);
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_PASS;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
if (start_pop && pop)
@@ -361,5 +375,4 @@ int bpf_prog10(struct sk_msg_md *msg)
return SK_DROP;
}
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c
new file mode 100644
index 000000000000..83df4919c224
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+int cork_byte;
+int push_start;
+int push_end;
+int apply_bytes;
+int pop_start;
+int pop_end;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map SEC(".maps");
+
+SEC("sk_msg")
+int prog_sk_policy(struct sk_msg_md *msg)
+{
+ if (cork_byte > 0)
+ bpf_msg_cork_bytes(msg, cork_byte);
+ if (push_start > 0 && push_end > 0)
+ bpf_msg_push_data(msg, push_start, push_end, 0);
+ if (pop_start >= 0 && pop_end > 0)
+ bpf_msg_pop_data(msg, pop_start, pop_end, 0);
+
+ return SK_PASS;
+}
+
+SEC("sk_msg")
+int prog_sk_policy_redir(struct sk_msg_md *msg)
+{
+ int two = 2;
+
+ bpf_msg_apply_bytes(msg, apply_bytes);
+ return bpf_msg_redirect_map(msg, &sock_map, two, 0);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
index a3a366c57ce1..b7250eb9c30c 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
@@ -15,6 +15,13 @@ struct {
} sock_map SEC(".maps");
struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} nop_map SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, 2);
__type(key, __u32);
@@ -28,16 +35,31 @@ struct {
__type(value, unsigned int);
} verdict_map SEC(".maps");
-static volatile bool test_sockmap; /* toggled by user-space */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} parser_map SEC(".maps");
+
+bool test_sockmap = false; /* toggled by user-space */
+bool test_ingress = false; /* toggled by user-space */
SEC("sk_skb/stream_parser")
-int prog_skb_parser(struct __sk_buff *skb)
+int prog_stream_parser(struct __sk_buff *skb)
{
+ int *value;
+ __u32 key = 0;
+
+ value = bpf_map_lookup_elem(&parser_map, &key);
+ if (value && *value)
+ return *value;
+
return skb->len;
}
SEC("sk_skb/stream_verdict")
-int prog_skb_verdict(struct __sk_buff *skb)
+int prog_stream_verdict(struct __sk_buff *skb)
{
unsigned int *count;
__u32 zero = 0;
@@ -55,6 +77,27 @@ int prog_skb_verdict(struct __sk_buff *skb)
return verdict;
}
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ unsigned int *count;
+ __u32 zero = 0;
+ int verdict;
+
+ if (test_sockmap)
+ verdict = bpf_sk_redirect_map(skb, &sock_map, zero,
+ test_ingress ? BPF_F_INGRESS : 0);
+ else
+ verdict = bpf_sk_redirect_hash(skb, &sock_hash, &zero,
+ test_ingress ? BPF_F_INGRESS : 0);
+
+ count = bpf_map_lookup_elem(&verdict_map, &verdict);
+ if (count)
+ (*count)++;
+
+ return verdict;
+}
+
SEC("sk_msg")
int prog_msg_verdict(struct sk_msg_md *msg)
{
@@ -94,5 +137,4 @@ int prog_reuseport(struct sk_reuseport_md *reuse)
return verdict;
}
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c b/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c
new file mode 100644
index 000000000000..69aacc96db36
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c
@@ -0,0 +1,47 @@
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_rx SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_tx SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_msg SEC(".maps");
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ return SK_PASS;
+}
+
+int clone_called;
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict_clone(struct __sk_buff *skb)
+{
+ clone_called = 1;
+ return SK_PASS;
+}
+
+SEC("sk_skb/stream_parser")
+int prog_skb_parser(struct __sk_buff *skb)
+{
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_progs_query.c b/tools/testing/selftests/bpf/progs/test_sockmap_progs_query.c
new file mode 100644
index 000000000000..9d58d61c0dee
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_progs_query.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} sock_map SEC(".maps");
+
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ return SK_PASS;
+}
+
+SEC("sk_msg")
+int prog_skmsg_verdict(struct sk_msg_md *msg)
+{
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_redir.c b/tools/testing/selftests/bpf/progs/test_sockmap_redir.c
new file mode 100644
index 000000000000..34d9f4f2f0a2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_redir.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC(".maps") struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} nop_map, sock_map;
+
+SEC(".maps") struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} nop_hash, sock_hash;
+
+SEC(".maps") struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, unsigned int);
+} verdict_map;
+
+/* Set by user space */
+int redirect_type;
+int redirect_flags;
+
+#define redirect_map(__data) \
+ _Generic((__data), \
+ struct __sk_buff * : bpf_sk_redirect_map, \
+ struct sk_msg_md * : bpf_msg_redirect_map \
+ )((__data), &sock_map, (__u32){0}, redirect_flags)
+
+#define redirect_hash(__data) \
+ _Generic((__data), \
+ struct __sk_buff * : bpf_sk_redirect_hash, \
+ struct sk_msg_md * : bpf_msg_redirect_hash \
+ )((__data), &sock_hash, &(__u32){0}, redirect_flags)
+
+#define DEFINE_PROG(__type, __param) \
+SEC("sk_" XSTR(__type)) \
+int prog_ ## __type ## _verdict(__param data) \
+{ \
+ unsigned int *count; \
+ int verdict; \
+ \
+ if (redirect_type == BPF_MAP_TYPE_SOCKMAP) \
+ verdict = redirect_map(data); \
+ else if (redirect_type == BPF_MAP_TYPE_SOCKHASH) \
+ verdict = redirect_hash(data); \
+ else \
+ verdict = redirect_type - __MAX_BPF_MAP_TYPE; \
+ \
+ count = bpf_map_lookup_elem(&verdict_map, &verdict); \
+ if (count) \
+ (*count)++; \
+ \
+ return verdict; \
+}
+
+DEFINE_PROG(skb, struct __sk_buff *);
+DEFINE_PROG(msg, struct sk_msg_md *);
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c b/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c
new file mode 100644
index 000000000000..d25b0bb30fc0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} sock_map SEC(".maps");
+
+SEC("sk_skb/verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ return SK_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_strp.c b/tools/testing/selftests/bpf/progs/test_sockmap_strp.c
new file mode 100644
index 000000000000..dde3d5bec515
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_strp.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+int verdict_max_size = 10000;
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map SEC(".maps");
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ __u32 one = 1;
+
+ if (skb->len > verdict_max_size)
+ return SK_PASS;
+
+ return bpf_sk_redirect_map(skb, &sock_map, one, 0);
+}
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict_pass(struct __sk_buff *skb)
+{
+ return SK_PASS;
+}
+
+SEC("sk_skb/stream_parser")
+int prog_skb_parser(struct __sk_buff *skb)
+{
+ return skb->len;
+}
+
+SEC("sk_skb/stream_parser")
+int prog_skb_parser_partial(struct __sk_buff *skb)
+{
+ /* agreement with the test program on a 4-byte size header
+ * and 6-byte body.
+ */
+ if (skb->len < 4) {
+ /* need more header to determine full length */
+ return 0;
+ }
+ /* return full length decoded from header.
+ * the return value may be larger than skb->len which
+ * means framework must wait body coming.
+ */
+ return 10;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_update.c b/tools/testing/selftests/bpf/progs/test_sockmap_update.c
index 9d0c9f28cab2..6d64ea536e3d 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_update.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_update.c
@@ -24,7 +24,7 @@ struct {
__type(value, __u64);
} dst_sock_hash SEC(".maps");
-SEC("classifier/copy_sock_map")
+SEC("tc")
int copy_sock_map(void *ctx)
{
struct bpf_sock *sk;
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c
index 0d31a3b3505f..d8d77bdffd3d 100644
--- a/tools/testing/selftests/bpf/progs/test_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c
@@ -3,6 +3,7 @@
#include <linux/bpf.h>
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
struct hmap_elem {
volatile int cnt;
@@ -45,8 +46,8 @@ struct {
#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
-SEC("spin_lock_demo")
-int bpf_sping_lock_test(struct __sk_buff *skb)
+SEC("cgroup_skb/ingress")
+int bpf_spin_lock_test(struct __sk_buff *skb)
{
volatile int credit = 0, max_credit = 100, pkt_len = 64;
struct hmap_elem zero = {}, *val;
@@ -89,6 +90,8 @@ int bpf_sping_lock_test(struct __sk_buff *skb)
credit = q->credit;
bpf_spin_unlock(&q->lock);
+ __sink(credit);
+
/* spin_lock in cgroup local storage */
cls = bpf_get_local_storage(&cls_map, 0);
bpf_spin_lock(&cls->lock);
@@ -98,4 +101,69 @@ int bpf_sping_lock_test(struct __sk_buff *skb)
err:
return err;
}
+
+struct bpf_spin_lock lockA __hidden SEC(".data.A");
+
+__noinline
+static int static_subprog(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ if (ctx->protocol)
+ return ret;
+ return ret + ctx->len;
+}
+
+__noinline
+static int static_subprog_lock(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ ret = static_subprog(ctx);
+ bpf_spin_lock(&lockA);
+ return ret + ctx->len;
+}
+
+__noinline
+static int static_subprog_unlock(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ ret = static_subprog(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret + ctx->len;
+}
+
+SEC("tc")
+int lock_static_subprog_call(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = static_subprog(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("tc")
+int lock_static_subprog_lock(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ ret = static_subprog_lock(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("tc")
+int lock_static_subprog_unlock(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ ret = static_subprog_unlock(ctx);
+ return ret;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
new file mode 100644
index 000000000000..f678ee6bd7ea
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+
+struct foo {
+ struct bpf_spin_lock lock;
+ int data;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct foo);
+ __uint(max_entries, 1);
+} array_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct array_map);
+} map_of_maps SEC(".maps") = {
+ .values = {
+ [0] = &array_map,
+ },
+};
+
+static struct bpf_spin_lock lockA SEC(".data.A");
+static struct bpf_spin_lock lockB SEC(".data.B");
+
+SEC("?tc")
+int lock_id_kptr_preserve(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_this_cpu_ptr(f);
+ return 0;
+}
+
+SEC("?tc")
+int lock_id_global_zero(void *ctx)
+{
+ bpf_this_cpu_ptr(&lockA);
+ return 0;
+}
+
+SEC("?tc")
+int lock_id_mapval_preserve(void *ctx)
+{
+ struct foo *f;
+ int key = 0;
+
+ f = bpf_map_lookup_elem(&array_map, &key);
+ if (!f)
+ return 0;
+ bpf_this_cpu_ptr(f);
+ return 0;
+}
+
+SEC("?tc")
+int lock_id_innermapval_preserve(void *ctx)
+{
+ struct foo *f;
+ int key = 0;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &key);
+ if (!map)
+ return 0;
+ f = bpf_map_lookup_elem(map, &key);
+ if (!f)
+ return 0;
+ bpf_this_cpu_ptr(f);
+ return 0;
+}
+
+#define CHECK(test, A, B) \
+ SEC("?tc") \
+ int lock_id_mismatch_##test(void *ctx) \
+ { \
+ struct foo *f1, *f2, *v, *iv; \
+ int key = 0; \
+ void *map; \
+ \
+ map = bpf_map_lookup_elem(&map_of_maps, &key); \
+ if (!map) \
+ return 0; \
+ iv = bpf_map_lookup_elem(map, &key); \
+ if (!iv) \
+ return 0; \
+ v = bpf_map_lookup_elem(&array_map, &key); \
+ if (!v) \
+ return 0; \
+ f1 = bpf_obj_new(typeof(*f1)); \
+ if (!f1) \
+ return 0; \
+ f2 = bpf_obj_new(typeof(*f2)); \
+ if (!f2) { \
+ bpf_obj_drop(f1); \
+ return 0; \
+ } \
+ bpf_spin_lock(A); \
+ bpf_spin_unlock(B); \
+ return 0; \
+ }
+
+CHECK(kptr_kptr, &f1->lock, &f2->lock);
+CHECK(kptr_global, &f1->lock, &lockA);
+CHECK(kptr_mapval, &f1->lock, &v->lock);
+CHECK(kptr_innermapval, &f1->lock, &iv->lock);
+
+CHECK(global_global, &lockA, &lockB);
+CHECK(global_kptr, &lockA, &f1->lock);
+CHECK(global_mapval, &lockA, &v->lock);
+CHECK(global_innermapval, &lockA, &iv->lock);
+
+SEC("?tc")
+int lock_id_mismatch_mapval_mapval(void *ctx)
+{
+ struct foo *f1, *f2;
+ int key = 0;
+
+ f1 = bpf_map_lookup_elem(&array_map, &key);
+ if (!f1)
+ return 0;
+ f2 = bpf_map_lookup_elem(&array_map, &key);
+ if (!f2)
+ return 0;
+
+ bpf_spin_lock(&f1->lock);
+ f1->data = 42;
+ bpf_spin_unlock(&f2->lock);
+
+ return 0;
+}
+
+CHECK(mapval_kptr, &v->lock, &f1->lock);
+CHECK(mapval_global, &v->lock, &lockB);
+CHECK(mapval_innermapval, &v->lock, &iv->lock);
+
+SEC("?tc")
+int lock_id_mismatch_innermapval_innermapval1(void *ctx)
+{
+ struct foo *f1, *f2;
+ int key = 0;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &key);
+ if (!map)
+ return 0;
+ f1 = bpf_map_lookup_elem(map, &key);
+ if (!f1)
+ return 0;
+ f2 = bpf_map_lookup_elem(map, &key);
+ if (!f2)
+ return 0;
+
+ bpf_spin_lock(&f1->lock);
+ f1->data = 42;
+ bpf_spin_unlock(&f2->lock);
+
+ return 0;
+}
+
+SEC("?tc")
+int lock_id_mismatch_innermapval_innermapval2(void *ctx)
+{
+ struct foo *f1, *f2;
+ int key = 0;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &key);
+ if (!map)
+ return 0;
+ f1 = bpf_map_lookup_elem(map, &key);
+ if (!f1)
+ return 0;
+ map = bpf_map_lookup_elem(&map_of_maps, &key);
+ if (!map)
+ return 0;
+ f2 = bpf_map_lookup_elem(map, &key);
+ if (!f2)
+ return 0;
+
+ bpf_spin_lock(&f1->lock);
+ f1->data = 42;
+ bpf_spin_unlock(&f2->lock);
+
+ return 0;
+}
+
+CHECK(innermapval_kptr, &iv->lock, &f1->lock);
+CHECK(innermapval_global, &iv->lock, &lockA);
+CHECK(innermapval_mapval, &iv->lock, &v->lock);
+
+#undef CHECK
+
+__noinline
+int global_subprog(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ if (ctx->protocol)
+ ret += ctx->protocol;
+ return ret + ctx->mark;
+}
+
+__noinline
+static int static_subprog_call_global(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ if (ctx->protocol)
+ return ret;
+ return ret + ctx->len + global_subprog(ctx);
+}
+
+SEC("?tc")
+int lock_global_subprog_call1(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = global_subprog(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("?tc")
+int lock_global_subprog_call2(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = static_subprog_call_global(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+int __noinline
+global_subprog_int(int i)
+{
+ if (i)
+ bpf_printk("%p", &i);
+ return i;
+}
+
+int __noinline
+global_sleepable_helper_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+int __noinline
+global_sleepable_kfunc_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user_str(&i, sizeof(i), NULL, 0);
+ global_subprog_int(i);
+ return i;
+}
+
+int __noinline
+global_subprog_calling_sleepable_global(int i)
+{
+ if (!i)
+ global_sleepable_kfunc_subprog(i);
+ return i;
+}
+
+SEC("?syscall")
+int lock_global_sleepable_helper_subprog(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = global_sleepable_helper_subprog(ctx->mark);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("?syscall")
+int lock_global_sleepable_kfunc_subprog(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = global_sleepable_kfunc_subprog(ctx->mark);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("?syscall")
+int lock_global_sleepable_subprog_indirect(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = global_subprog_calling_sleepable_global(ctx->mark);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_stack_var_off.c b/tools/testing/selftests/bpf/progs/test_stack_var_off.c
new file mode 100644
index 000000000000..665e6ae09d37
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_stack_var_off.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+int probe_res;
+
+char input[4] = {};
+int test_pid;
+
+SEC("tracepoint/syscalls/sys_enter_nanosleep")
+int probe(void *ctx)
+{
+ /* This BPF program performs variable-offset reads and writes on a
+ * stack-allocated buffer.
+ */
+ char stack_buf[16];
+ unsigned long len;
+ unsigned long last;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != test_pid)
+ return 0;
+
+ /* Copy the input to the stack. */
+ __builtin_memcpy(stack_buf, input, 4);
+
+ /* The first byte in the buffer indicates the length. */
+ len = stack_buf[0] & 0xf;
+ last = (len - 1) & 0xf;
+
+ /* Append something to the buffer. The offset where we write is not
+ * statically known; this is a variable-offset stack write.
+ */
+ stack_buf[len] = 42;
+
+ /* Index into the buffer at an unknown offset. This is a
+ * variable-offset stack read.
+ *
+ * Note that if it wasn't for the preceding variable-offset write, this
+ * read would be rejected because the stack slot cannot be verified as
+ * being initialized. With the preceding variable-offset write, the
+ * stack slot still cannot be verified, but the write inhibits the
+ * respective check on the reasoning that, if there was a
+ * variable-offset to a higher-or-equal spot, we're probably reading
+ * what we just wrote.
+ */
+ probe_res = stack_buf[last];
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
index 0cf0134631b4..0c4426592a26 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
@@ -28,8 +28,8 @@ struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 128);
__uint(map_flags, BPF_F_STACK_BUILD_ID);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(stack_trace_t));
+ __type(key, __u32);
+ __type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
@@ -39,16 +39,8 @@ struct {
__type(value, stack_trace_t);
} stack_amap SEC(".maps");
-/* taken from /sys/kernel/debug/tracing/events/random/urandom_read/format */
-struct random_urandom_args {
- unsigned long long pad;
- int got_bits;
- int pool_left;
- int input_left;
-};
-
-SEC("tracepoint/random/urandom_read")
-int oncpu(struct random_urandom_args *args)
+SEC("kprobe/urandom_read_iter")
+int oncpu(struct pt_regs *args)
{
__u32 max_len = sizeof(struct bpf_stack_build_id)
* PERF_MAX_STACK_DEPTH;
@@ -73,4 +65,3 @@ int oncpu(struct random_urandom_args *args)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
index 00ed48672620..47568007b668 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
-#include <linux/bpf.h>
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#ifndef PERF_MAX_STACK_DEPTH
@@ -27,8 +27,8 @@ typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 16384);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(stack_trace_t));
+ __type(key, __u32);
+ __type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
@@ -38,14 +38,14 @@ struct {
__type(value, stack_trace_t);
} stack_amap SEC(".maps");
-/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */
struct sched_switch_args {
unsigned long long pad;
- char prev_comm[16];
+ char prev_comm[TASK_COMM_LEN];
int prev_pid;
int prev_prio;
long long prev_state;
- char next_comm[16];
+ char next_comm[TASK_COMM_LEN];
int next_pid;
int next_prio;
};
diff --git a/tools/testing/selftests/bpf/progs/test_static_linked1.c b/tools/testing/selftests/bpf/progs/test_static_linked1.c
new file mode 100644
index 000000000000..4f0b612e1661
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_static_linked1.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* 8-byte aligned .data */
+static volatile long static_var1 = 2;
+static volatile int static_var2 = 3;
+int var1 = -1;
+/* 4-byte aligned .rodata */
+const volatile int rovar1;
+
+/* same "subprog" name in both files */
+static __noinline int subprog(int x)
+{
+ /* but different formula */
+ return x * 2;
+}
+
+SEC("raw_tp/sys_enter")
+int handler1(const void *ctx)
+{
+ var1 = subprog(rovar1) + static_var1 + static_var2;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
+int VERSION SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_static_linked2.c b/tools/testing/selftests/bpf/progs/test_static_linked2.c
new file mode 100644
index 000000000000..766ebd502a60
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_static_linked2.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* 4-byte aligned .data */
+static volatile int static_var1 = 5;
+static volatile int static_var2 = 6;
+int var2 = -1;
+/* 8-byte aligned .rodata */
+const volatile long rovar2;
+
+/* same "subprog" name in both files */
+static __noinline int subprog(int x)
+{
+ /* but different formula */
+ return x * 3;
+}
+
+SEC("raw_tp/sys_enter")
+int handler2(const void *ctx)
+{
+ var2 = subprog(rovar2) + static_var1 + static_var2;
+
+ return 0;
+}
+
+/* different name and/or type of the variable doesn't matter */
+char _license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs.c b/tools/testing/selftests/bpf/progs/test_subprogs.c
index d3c5673c0218..a8d602d7c88a 100644
--- a/tools/testing/selftests/bpf/progs/test_subprogs.c
+++ b/tools/testing/selftests/bpf/progs/test_subprogs.c
@@ -4,8 +4,18 @@
const char LICENSE[] SEC("license") = "GPL";
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array SEC(".maps");
+
__noinline int sub1(int x)
{
+ int key = 0;
+
+ bpf_map_lookup_elem(&array, &key);
return x + 1;
}
@@ -23,6 +33,9 @@ static __noinline int sub3(int z)
static __noinline int sub4(int w)
{
+ int key = 0;
+
+ bpf_map_lookup_elem(&array, &key);
return w + sub3(5) + sub1(6);
}
@@ -34,7 +47,7 @@ static __noinline int sub5(int v)
return sub1(v) - 1; /* compensates sub1()'s + 1 */
}
-/* unfortunately verifier rejects `struct task_struct *t` as an unkown pointer
+/* unfortunately verifier rejects `struct task_struct *t` as an unknown pointer
* type, so we need to accept pointer as integer and then cast it inside the
* function
*/
@@ -76,6 +89,11 @@ int prog2(void *ctx)
return 0;
}
+static int empty_callback(__u32 index, void *data)
+{
+ return 0;
+}
+
/* prog3 has the same section name as prog1 */
SEC("raw_tp/sys_enter")
int prog3(void *ctx)
@@ -85,6 +103,9 @@ int prog3(void *ctx)
if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
return 1;
+ /* test that ld_imm64 with BPF_PSEUDO_FUNC doesn't get blinded */
+ bpf_loop(1, empty_callback, NULL, 0);
+
res3 = sub3(5) + 6; /* (5 + 3 + (4 + 1)) + 6 = 19 */
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs_extable.c b/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
new file mode 100644
index 000000000000..dcac69f5928a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 8);
+ __type(key, __u32);
+ __type(value, __u64);
+} test_array SEC(".maps");
+
+unsigned int triggered;
+
+static __u64 test_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
+{
+ return 1;
+}
+
+SEC("fexit/bpf_testmod_return_ptr")
+int BPF_PROG(handle_fexit_ret_subprogs, int arg, struct file *ret)
+{
+ *(volatile int *)ret;
+ *(volatile int *)&ret->f_mode;
+ bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
+ triggered++;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_return_ptr")
+int BPF_PROG(handle_fexit_ret_subprogs2, int arg, struct file *ret)
+{
+ *(volatile int *)ret;
+ *(volatile int *)&ret->f_mode;
+ bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
+ triggered++;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_return_ptr")
+int BPF_PROG(handle_fexit_ret_subprogs3, int arg, struct file *ret)
+{
+ *(volatile int *)ret;
+ *(volatile int *)&ret->f_mode;
+ bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
+ triggered++;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs_unused.c b/tools/testing/selftests/bpf/progs/test_subprogs_unused.c
new file mode 100644
index 000000000000..bc49e050d342
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subprogs_unused.c
@@ -0,0 +1,21 @@
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+const char LICENSE[] SEC("license") = "GPL";
+
+__attribute__((unused)) __noinline int unused1(int x)
+{
+ return x + 1;
+}
+
+static __attribute__((unused)) __noinline int unused2(int x)
+{
+ return x + 2;
+}
+
+SEC("raw_tp/sys_enter")
+int main_prog(void *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_subskeleton.c b/tools/testing/selftests/bpf/progs/test_subskeleton.c
new file mode 100644
index 000000000000..006417974372
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subskeleton.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* volatile to force a read, compiler may assume 0 otherwise */
+const volatile int rovar1;
+int out1;
+
+/* Override weak symbol in test_subskeleton_lib */
+int var5 = 5;
+
+extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
+
+extern int lib_routine(void);
+
+SEC("raw_tp/sys_enter")
+int handler1(const void *ctx)
+{
+ (void) CONFIG_BPF_SYSCALL;
+
+ out1 = lib_routine() * rovar1;
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_subskeleton_lib.c b/tools/testing/selftests/bpf/progs/test_subskeleton_lib.c
new file mode 100644
index 000000000000..ecfafe812c36
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subskeleton_lib.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* volatile to force a read */
+const volatile int var1;
+volatile int var2 = 1;
+struct {
+ int var3_1;
+ __s64 var3_2;
+} var3;
+int libout1;
+
+extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
+
+int var4[4];
+
+__weak int var5 SEC(".data");
+
+/* Fully contained within library extern-and-definition */
+extern int var6;
+
+int var7 SEC(".data.custom");
+
+int (*fn_ptr)(void);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 16);
+} map1 SEC(".maps");
+
+extern struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 16);
+} map2 SEC(".maps");
+
+int lib_routine(void)
+{
+ __u32 key = 1, value = 2;
+
+ (void) CONFIG_BPF_SYSCALL;
+ bpf_map_update_elem(&map2, &key, &value, BPF_ANY);
+
+ libout1 = var1 + var2 + var3.var3_1 + var3.var3_2 + var5 + var6;
+ return libout1;
+}
+
+SEC("perf_event")
+int lib_perf_handler(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c b/tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c
new file mode 100644
index 000000000000..80238486b7ce
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+int var6 = 6;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 16);
+} map2 SEC(".maps");
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
index 553a282d816a..548660e299a5 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
@@ -9,9 +9,8 @@
#include <bpf/bpf_helpers.h>
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_compiler.h"
+#include "bpf_misc.h"
/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
#define TCP_MEM_LOOPS 28 /* because 30 doesn't fit into 512 bytes of stack */
@@ -30,7 +29,7 @@ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
@@ -59,7 +58,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
index 2b64bc563a12..81249d119a8b 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
@@ -9,9 +9,8 @@
#include <bpf/bpf_helpers.h>
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_compiler.h"
+#include "bpf_misc.h"
/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
#define TCP_MEM_LOOPS 20 /* because 30 doesn't fit into 512 bytes of stack */
@@ -30,7 +29,7 @@ static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
@@ -57,7 +56,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
index 5489823c83fc..bbdd08764789 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
@@ -9,16 +9,15 @@
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+#include "bpf_misc.h"
+
/* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */
#define MAX_ULONG_STR_LEN 0xF
/* Max supported length of sysctl value string (pow2). */
#define MAX_VALUE_STR_LEN 0x40
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
const char tcp_mem_name[] = "net/ipv4/tcp_mem";
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
{
@@ -31,7 +30,7 @@ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
@@ -57,7 +56,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
diff --git a/tools/testing/selftests/bpf/progs/test_task_pt_regs.c b/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
new file mode 100644
index 000000000000..1926facba122
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define PT_REGS_SIZE sizeof(struct pt_regs)
+
+/*
+ * The kernel struct pt_regs isn't exported in its entirety to userspace.
+ * Pass it as an array to task_pt_regs.c
+ */
+char current_regs[PT_REGS_SIZE] = {};
+char ctx_regs[PT_REGS_SIZE] = {};
+int uprobe_res = 0;
+
+SEC("uprobe")
+int handle_uprobe(struct pt_regs *ctx)
+{
+ struct task_struct *current;
+ struct pt_regs *regs;
+
+ current = bpf_get_current_task_btf();
+ regs = (struct pt_regs *) bpf_task_pt_regs(current);
+ if (bpf_probe_read_kernel(current_regs, PT_REGS_SIZE, regs))
+ return 0;
+ if (bpf_probe_read_kernel(ctx_regs, PT_REGS_SIZE, ctx))
+ return 0;
+
+ /* Prove that uprobe was run */
+ uprobe_res = 1;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
new file mode 100644
index 000000000000..0b74b8bd22e8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Bytedance */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+const volatile int local_pid;
+const volatile __u64 cgid;
+int remote_pid;
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(tp_btf_run, struct task_struct *task, u64 clone_flags)
+{
+ struct cgroup *cgrp = NULL;
+ struct task_struct *acquired;
+
+ if (local_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ acquired = bpf_task_acquire(task);
+ if (!acquired)
+ return 0;
+
+ if (local_pid == acquired->tgid)
+ goto out;
+
+ cgrp = bpf_cgroup_from_id(cgid);
+ if (!cgrp)
+ goto out;
+
+ if (bpf_task_under_cgroup(acquired, cgrp))
+ remote_pid = acquired->tgid;
+
+out:
+ if (cgrp)
+ bpf_cgroup_release(cgrp);
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("lsm.s/bpf")
+int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ struct cgroup *cgrp = NULL;
+ struct task_struct *task;
+ int ret = 0;
+
+ task = bpf_get_current_task_btf();
+ if (local_pid != task->pid)
+ return 0;
+
+ if (cmd != BPF_LINK_CREATE)
+ return 0;
+
+ /* 1 is the root cgroup */
+ cgrp = bpf_cgroup_from_id(1);
+ if (!cgrp)
+ goto out;
+ if (!bpf_task_under_cgroup(task, cgrp))
+ ret = -1;
+ bpf_cgroup_release(cgrp);
+
+out:
+ return ret;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_bpf.c b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
new file mode 100644
index 000000000000..ef7da419632a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+
+/* Dummy prog to test TC-BPF API */
+
+SEC("tc")
+int cls(struct __sk_buff *skb)
+{
+ return 0;
+}
+
+/* Prog to verify tc-bpf without cap_sys_admin and cap_perfmon */
+SEC("tcx/ingress")
+int pkt_ptr(struct __sk_buff *skb)
+{
+ struct iphdr *iph = (void *)(long)skb->data + sizeof(struct ethhdr);
+
+ if ((long)(iph + 1) > (long)skb->data_end)
+ return 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_change_tail.c b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c
new file mode 100644
index 000000000000..fcba8299f0bc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE __PAGE_SIZE
+#endif
+#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2)
+
+long change_tail_ret = 1;
+
+static __always_inline struct iphdr *parse_ip_header(struct __sk_buff *skb, int *ip_proto)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ struct ethhdr *eth = data;
+ struct iphdr *iph;
+
+ /* Verify Ethernet header */
+ if ((void *)(data + sizeof(*eth)) > data_end)
+ return NULL;
+
+ /* Skip Ethernet header to get to IP header */
+ iph = (void *)(data + sizeof(struct ethhdr));
+
+ /* Verify IP header */
+ if ((void *)(data + sizeof(struct ethhdr) + sizeof(*iph)) > data_end)
+ return NULL;
+
+ /* Basic IP header validation */
+ if (iph->version != 4) /* Only support IPv4 */
+ return NULL;
+
+ if (iph->ihl < 5) /* Minimum IP header length */
+ return NULL;
+
+ *ip_proto = iph->protocol;
+ return iph;
+}
+
+static __always_inline struct udphdr *parse_udp_header(struct __sk_buff *skb, struct iphdr *iph)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *hdr = (void *)iph;
+ struct udphdr *udp;
+
+ /* Calculate UDP header position */
+ udp = hdr + (iph->ihl * 4);
+ hdr = (void *)udp;
+
+ /* Verify UDP header bounds */
+ if ((void *)(hdr + sizeof(*udp)) > data_end)
+ return NULL;
+
+ return udp;
+}
+
+SEC("tc/ingress")
+int change_tail(struct __sk_buff *skb)
+{
+ int len = skb->len;
+ struct udphdr *udp;
+ struct iphdr *iph;
+ void *data_end;
+ char *payload;
+ int ip_proto;
+
+ bpf_skb_pull_data(skb, len);
+
+ data_end = (void *)(long)skb->data_end;
+ iph = parse_ip_header(skb, &ip_proto);
+ if (!iph)
+ return TCX_PASS;
+
+ if (ip_proto != IPPROTO_UDP)
+ return TCX_PASS;
+
+ udp = parse_udp_header(skb, iph);
+ if (!udp)
+ return TCX_PASS;
+
+ payload = (char *)udp + (sizeof(struct udphdr));
+ if (payload + 1 > (char *)data_end)
+ return TCX_PASS;
+
+ if (payload[0] == 'T') { /* Trim the packet */
+ change_tail_ret = bpf_skb_change_tail(skb, len - 1, 0);
+ if (!change_tail_ret)
+ bpf_skb_change_tail(skb, len, 0);
+ return TCX_PASS;
+ } else if (payload[0] == 'G') { /* Grow the packet */
+ change_tail_ret = bpf_skb_change_tail(skb, len + 1, 0);
+ if (!change_tail_ret)
+ bpf_skb_change_tail(skb, len, 0);
+ return TCX_PASS;
+ } else if (payload[0] == 'E') { /* Error */
+ change_tail_ret = bpf_skb_change_tail(skb, BPF_SKB_MAX_LEN, 0);
+ return TCX_PASS;
+ } else if (payload[0] == 'Z') { /* Zero */
+ change_tail_ret = bpf_skb_change_tail(skb, 0, 0);
+ return TCX_PASS;
+ }
+ return TCX_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_dtime.c b/tools/testing/selftests/bpf/progs/test_tc_dtime.c
new file mode 100644
index 000000000000..ca8e8734d901
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_dtime.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2022 Meta
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <linux/stddef.h>
+#include <linux/pkt_cls.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+/* veth_src --- veth_src_fwd --- veth_det_fwd --- veth_dst
+ * | |
+ * ns_src | ns_fwd | ns_dst
+ *
+ * ns_src and ns_dst: ENDHOST namespace
+ * ns_fwd: Fowarding namespace
+ */
+
+#define ctx_ptr(field) (void *)(long)(field)
+
+#define ip4_src __bpf_htonl(0xac100164) /* 172.16.1.100 */
+#define ip4_dst __bpf_htonl(0xac100264) /* 172.16.2.100 */
+
+#define ip6_src { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe }
+#define ip6_dst { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x02, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe }
+
+#define v6_equal(a, b) (a.s6_addr32[0] == b.s6_addr32[0] && \
+ a.s6_addr32[1] == b.s6_addr32[1] && \
+ a.s6_addr32[2] == b.s6_addr32[2] && \
+ a.s6_addr32[3] == b.s6_addr32[3])
+
+volatile const __u32 IFINDEX_SRC;
+volatile const __u32 IFINDEX_DST;
+
+#define EGRESS_ENDHOST_MAGIC 0x0b9fbeef
+#define INGRESS_FWDNS_MAGIC 0x1b9fbeef
+#define EGRESS_FWDNS_MAGIC 0x2b9fbeef
+
+enum {
+ INGRESS_FWDNS_P100,
+ INGRESS_FWDNS_P101,
+ EGRESS_FWDNS_P100,
+ EGRESS_FWDNS_P101,
+ INGRESS_ENDHOST,
+ EGRESS_ENDHOST,
+ SET_DTIME,
+ __MAX_CNT,
+};
+
+enum {
+ TCP_IP6_CLEAR_DTIME,
+ TCP_IP4,
+ TCP_IP6,
+ UDP_IP4,
+ UDP_IP6,
+ TCP_IP4_RT_FWD,
+ TCP_IP6_RT_FWD,
+ UDP_IP4_RT_FWD,
+ UDP_IP6_RT_FWD,
+ UKN_TEST,
+ __NR_TESTS,
+};
+
+enum {
+ SRC_NS = 1,
+ DST_NS,
+};
+
+__u32 dtimes[__NR_TESTS][__MAX_CNT] = {};
+__u32 errs[__NR_TESTS][__MAX_CNT] = {};
+__u32 test = 0;
+
+static void inc_dtimes(__u32 idx)
+{
+ if (test < __NR_TESTS)
+ dtimes[test][idx]++;
+ else
+ dtimes[UKN_TEST][idx]++;
+}
+
+static void inc_errs(__u32 idx)
+{
+ if (test < __NR_TESTS)
+ errs[test][idx]++;
+ else
+ errs[UKN_TEST][idx]++;
+}
+
+static int skb_proto(int type)
+{
+ return type & 0xff;
+}
+
+static int skb_ns(int type)
+{
+ return (type >> 8) & 0xff;
+}
+
+static bool fwdns_clear_dtime(void)
+{
+ return test == TCP_IP6_CLEAR_DTIME;
+}
+
+static bool bpf_fwd(void)
+{
+ return test < TCP_IP4_RT_FWD;
+}
+
+static __u8 get_proto(void)
+{
+ switch (test) {
+ case UDP_IP4:
+ case UDP_IP6:
+ case UDP_IP4_RT_FWD:
+ case UDP_IP6_RT_FWD:
+ return IPPROTO_UDP;
+ default:
+ return IPPROTO_TCP;
+ }
+}
+
+/* -1: parse error: TC_ACT_SHOT
+ * 0: not testing traffic: TC_ACT_OK
+ * >0: first byte is the inet_proto, second byte has the netns
+ * of the sender
+ */
+static int skb_get_type(struct __sk_buff *skb)
+{
+ __u16 dst_ns_port = __bpf_htons(50000 + test);
+ void *data_end = ctx_ptr(skb->data_end);
+ void *data = ctx_ptr(skb->data);
+ __u8 inet_proto = 0, ns = 0;
+ struct ipv6hdr *ip6h;
+ __u16 sport, dport;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ void *trans;
+
+ switch (skb->protocol) {
+ case __bpf_htons(ETH_P_IP):
+ iph = data + sizeof(struct ethhdr);
+ if (iph + 1 > data_end)
+ return -1;
+ if (iph->saddr == ip4_src)
+ ns = SRC_NS;
+ else if (iph->saddr == ip4_dst)
+ ns = DST_NS;
+ inet_proto = iph->protocol;
+ trans = iph + 1;
+ break;
+ case __bpf_htons(ETH_P_IPV6):
+ ip6h = data + sizeof(struct ethhdr);
+ if (ip6h + 1 > data_end)
+ return -1;
+ if (v6_equal(ip6h->saddr, (struct in6_addr){{ip6_src}}))
+ ns = SRC_NS;
+ else if (v6_equal(ip6h->saddr, (struct in6_addr){{ip6_dst}}))
+ ns = DST_NS;
+ inet_proto = ip6h->nexthdr;
+ trans = ip6h + 1;
+ break;
+ default:
+ return 0;
+ }
+
+ /* skb is not from src_ns or dst_ns.
+ * skb is not the testing IPPROTO.
+ */
+ if (!ns || inet_proto != get_proto())
+ return 0;
+
+ switch (inet_proto) {
+ case IPPROTO_TCP:
+ th = trans;
+ if (th + 1 > data_end)
+ return -1;
+ sport = th->source;
+ dport = th->dest;
+ break;
+ case IPPROTO_UDP:
+ uh = trans;
+ if (uh + 1 > data_end)
+ return -1;
+ sport = uh->source;
+ dport = uh->dest;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The skb is the testing traffic */
+ if ((ns == SRC_NS && dport == dst_ns_port) ||
+ (ns == DST_NS && sport == dst_ns_port))
+ return (ns << 8 | inet_proto);
+
+ return 0;
+}
+
+/* format: direction@iface@netns
+ * egress@veth_(src|dst)@ns_(src|dst)
+ */
+SEC("tc")
+int egress_host(struct __sk_buff *skb)
+{
+ int skb_type;
+
+ skb_type = skb_get_type(skb);
+ if (skb_type == -1)
+ return TC_ACT_SHOT;
+ if (!skb_type)
+ return TC_ACT_OK;
+
+ if (skb_proto(skb_type) == IPPROTO_TCP) {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC &&
+ skb->tstamp)
+ inc_dtimes(EGRESS_ENDHOST);
+ else
+ inc_errs(EGRESS_ENDHOST);
+ } else if (skb_proto(skb_type) == IPPROTO_UDP) {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_TAI &&
+ skb->tstamp)
+ inc_dtimes(EGRESS_ENDHOST);
+ else
+ inc_errs(EGRESS_ENDHOST);
+ } else {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_REALTIME &&
+ skb->tstamp)
+ inc_errs(EGRESS_ENDHOST);
+ }
+
+ skb->tstamp = EGRESS_ENDHOST_MAGIC;
+
+ return TC_ACT_OK;
+}
+
+/* ingress@veth_(src|dst)@ns_(src|dst) */
+SEC("tc")
+int ingress_host(struct __sk_buff *skb)
+{
+ int skb_type;
+
+ skb_type = skb_get_type(skb);
+ if (skb_type == -1)
+ return TC_ACT_SHOT;
+ if (!skb_type)
+ return TC_ACT_OK;
+
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC &&
+ skb->tstamp == EGRESS_FWDNS_MAGIC)
+ inc_dtimes(INGRESS_ENDHOST);
+ else
+ inc_errs(INGRESS_ENDHOST);
+
+ return TC_ACT_OK;
+}
+
+/* ingress@veth_(src|dst)_fwd@ns_fwd priority 100 */
+SEC("tc")
+int ingress_fwdns_prio100(struct __sk_buff *skb)
+{
+ int skb_type;
+
+ skb_type = skb_get_type(skb);
+ if (skb_type == -1)
+ return TC_ACT_SHOT;
+ if (!skb_type)
+ return TC_ACT_OK;
+
+ /* delivery_time is only available to the ingress
+ * if the tc-bpf checks the skb->tstamp_type.
+ */
+ if (skb->tstamp == EGRESS_ENDHOST_MAGIC)
+ inc_errs(INGRESS_FWDNS_P100);
+
+ if (fwdns_clear_dtime())
+ skb->tstamp = 0;
+
+ return TC_ACT_UNSPEC;
+}
+
+/* egress@veth_(src|dst)_fwd@ns_fwd priority 100 */
+SEC("tc")
+int egress_fwdns_prio100(struct __sk_buff *skb)
+{
+ int skb_type;
+
+ skb_type = skb_get_type(skb);
+ if (skb_type == -1)
+ return TC_ACT_SHOT;
+ if (!skb_type)
+ return TC_ACT_OK;
+
+ /* delivery_time is always available to egress even
+ * the tc-bpf did not use the tstamp_type.
+ */
+ if (skb->tstamp == INGRESS_FWDNS_MAGIC)
+ inc_dtimes(EGRESS_FWDNS_P100);
+ else
+ inc_errs(EGRESS_FWDNS_P100);
+
+ if (fwdns_clear_dtime())
+ skb->tstamp = 0;
+
+ return TC_ACT_UNSPEC;
+}
+
+/* ingress@veth_(src|dst)_fwd@ns_fwd priority 101 */
+SEC("tc")
+int ingress_fwdns_prio101(struct __sk_buff *skb)
+{
+ int skb_type;
+
+ skb_type = skb_get_type(skb);
+ if (skb_type == -1 || !skb_type)
+ /* Should have handled in prio100 */
+ return TC_ACT_SHOT;
+
+ if (skb->tstamp_type) {
+ if (fwdns_clear_dtime() ||
+ (skb->tstamp_type != BPF_SKB_CLOCK_MONOTONIC &&
+ skb->tstamp_type != BPF_SKB_CLOCK_TAI) ||
+ skb->tstamp != EGRESS_ENDHOST_MAGIC)
+ inc_errs(INGRESS_FWDNS_P101);
+ else
+ inc_dtimes(INGRESS_FWDNS_P101);
+ } else {
+ if (!fwdns_clear_dtime())
+ inc_errs(INGRESS_FWDNS_P101);
+ }
+
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC) {
+ skb->tstamp = INGRESS_FWDNS_MAGIC;
+ } else {
+ if (bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
+ BPF_SKB_CLOCK_MONOTONIC))
+ inc_errs(SET_DTIME);
+ }
+
+ if (skb_ns(skb_type) == SRC_NS)
+ return bpf_fwd() ?
+ bpf_redirect_neigh(IFINDEX_DST, NULL, 0, 0) : TC_ACT_OK;
+ else
+ return bpf_fwd() ?
+ bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0) : TC_ACT_OK;
+}
+
+/* egress@veth_(src|dst)_fwd@ns_fwd priority 101 */
+SEC("tc")
+int egress_fwdns_prio101(struct __sk_buff *skb)
+{
+ int skb_type;
+
+ skb_type = skb_get_type(skb);
+ if (skb_type == -1 || !skb_type)
+ /* Should have handled in prio100 */
+ return TC_ACT_SHOT;
+
+ if (skb->tstamp_type) {
+ if (fwdns_clear_dtime() ||
+ skb->tstamp_type != BPF_SKB_CLOCK_MONOTONIC ||
+ skb->tstamp != INGRESS_FWDNS_MAGIC)
+ inc_errs(EGRESS_FWDNS_P101);
+ else
+ inc_dtimes(EGRESS_FWDNS_P101);
+ } else {
+ if (!fwdns_clear_dtime())
+ inc_errs(EGRESS_FWDNS_P101);
+ }
+
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC) {
+ skb->tstamp = EGRESS_FWDNS_MAGIC;
+ } else {
+ if (bpf_skb_set_tstamp(skb, EGRESS_FWDNS_MAGIC,
+ BPF_SKB_CLOCK_MONOTONIC))
+ inc_errs(SET_DTIME);
+ }
+
+ return TC_ACT_OK;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_edt.c b/tools/testing/selftests/bpf/progs/test_tc_edt.c
index bf28814bfde5..950a70b61e74 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_edt.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_edt.c
@@ -17,12 +17,12 @@
#define THROTTLE_RATE_BPS (5 * 1000 * 1000)
/* flow_key => last_tstamp timestamp used */
-struct bpf_map_def SEC("maps") flow_map = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(uint32_t),
- .value_size = sizeof(uint64_t),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, uint32_t);
+ __type(value, uint64_t);
+ __uint(max_entries, 1);
+} flow_map SEC(".maps");
static inline int throttle_flow(struct __sk_buff *skb)
{
diff --git a/tools/testing/selftests/bpf/progs/test_tc_link.c b/tools/testing/selftests/bpf/progs/test_tc_link.c
new file mode 100644
index 000000000000..630f12e51b07
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_link.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+#include <stdbool.h>
+
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/stddef.h>
+#include <linux/if_packet.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char LICENSE[] SEC("license") = "GPL";
+
+bool seen_tc1;
+bool seen_tc2;
+bool seen_tc3;
+bool seen_tc4;
+bool seen_tc5;
+bool seen_tc6;
+bool seen_tc7;
+bool seen_tc8;
+
+bool set_type;
+
+bool seen_eth;
+bool seen_host;
+bool seen_mcast;
+
+int mark, prio;
+unsigned short headroom, tailroom;
+
+SEC("tc/ingress")
+int tc1(struct __sk_buff *skb)
+{
+ struct ethhdr eth = {};
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IP))
+ goto out;
+ if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
+ goto out;
+ seen_eth = eth.h_proto == bpf_htons(ETH_P_IP);
+ seen_host = skb->pkt_type == PACKET_HOST;
+ if (seen_host && set_type) {
+ eth.h_dest[0] = 4;
+ if (bpf_skb_store_bytes(skb, 0, &eth, sizeof(eth), 0))
+ goto fail;
+ bpf_skb_change_type(skb, PACKET_MULTICAST);
+ }
+out:
+ seen_tc1 = true;
+fail:
+ return TCX_NEXT;
+}
+
+SEC("tc/egress")
+int tc2(struct __sk_buff *skb)
+{
+ seen_tc2 = true;
+ return TCX_NEXT;
+}
+
+SEC("tc/egress")
+int tc3(struct __sk_buff *skb)
+{
+ seen_tc3 = true;
+ return TCX_NEXT;
+}
+
+SEC("tc/egress")
+int tc4(struct __sk_buff *skb)
+{
+ seen_tc4 = true;
+ return TCX_NEXT;
+}
+
+SEC("tc/egress")
+int tc5(struct __sk_buff *skb)
+{
+ seen_tc5 = true;
+ return TCX_PASS;
+}
+
+SEC("tc/egress")
+int tc6(struct __sk_buff *skb)
+{
+ seen_tc6 = true;
+ return TCX_PASS;
+}
+
+SEC("tc/ingress")
+int tc7(struct __sk_buff *skb)
+{
+ struct ethhdr eth = {};
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IP))
+ goto out;
+ if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
+ goto out;
+ if (eth.h_dest[0] == 4 && set_type) {
+ seen_mcast = skb->pkt_type == PACKET_MULTICAST;
+ bpf_skb_change_type(skb, PACKET_HOST);
+ }
+out:
+ seen_tc7 = true;
+ return TCX_PASS;
+}
+
+struct sk_buff {
+ struct net_device *dev;
+};
+
+struct net_device {
+ unsigned short needed_headroom;
+ unsigned short needed_tailroom;
+};
+
+SEC("tc/egress")
+int tc8(struct __sk_buff *skb)
+{
+ struct net_device *dev = BPF_CORE_READ((struct sk_buff *)skb, dev);
+
+ seen_tc8 = true;
+ mark = skb->mark;
+ prio = skb->priority;
+ headroom = BPF_CORE_READ(dev, needed_headroom);
+ tailroom = BPF_CORE_READ(dev, needed_tailroom);
+ return TCX_PASS;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh.c b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
index b985ac4e7a81..de15155f2609 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_neigh.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
@@ -33,17 +33,8 @@
a.s6_addr32[3] == b.s6_addr32[3])
#endif
-enum {
- dev_src,
- dev_dst,
-};
-
-struct bpf_map_def SEC("maps") ifindex_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 2,
-};
+volatile const __u32 IFINDEX_SRC;
+volatile const __u32 IFINDEX_DST;
static __always_inline bool is_remote_ep_v4(struct __sk_buff *skb,
__be32 addr)
@@ -79,14 +70,8 @@ static __always_inline bool is_remote_ep_v6(struct __sk_buff *skb,
return v6_equal(ip6h->daddr, addr);
}
-static __always_inline int get_dev_ifindex(int which)
-{
- int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which);
-
- return ifindex ? *ifindex : 0;
-}
-
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("tc")
+int tc_chk(struct __sk_buff *skb)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
@@ -98,7 +83,8 @@ SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK;
}
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("tc")
+int tc_dst(struct __sk_buff *skb)
{
__u8 zero[ETH_ALEN * 2];
bool redirect = false;
@@ -108,7 +94,7 @@ SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_src));
break;
case __bpf_constant_htons(ETH_P_IPV6):
- redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_src);
+ redirect = is_remote_ep_v6(skb, (struct in6_addr){{ip6_src}});
break;
}
@@ -119,10 +105,11 @@ SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
return TC_ACT_SHOT;
- return bpf_redirect_neigh(get_dev_ifindex(dev_src), NULL, 0, 0);
+ return bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0);
}
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("tc")
+int tc_src(struct __sk_buff *skb)
{
__u8 zero[ETH_ALEN * 2];
bool redirect = false;
@@ -132,7 +119,7 @@ SEC("src_ingress") int tc_src(struct __sk_buff *skb)
redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_dst));
break;
case __bpf_constant_htons(ETH_P_IPV6):
- redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_dst);
+ redirect = is_remote_ep_v6(skb, (struct in6_addr){{ip6_dst}});
break;
}
@@ -143,7 +130,7 @@ SEC("src_ingress") int tc_src(struct __sk_buff *skb)
if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
return TC_ACT_SHOT;
- return bpf_redirect_neigh(get_dev_ifindex(dev_dst), NULL, 0, 0);
+ return bpf_redirect_neigh(IFINDEX_DST, NULL, 0, 0);
}
char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
index d82ed3457030..ec4cce19362d 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
@@ -75,7 +75,8 @@ static __always_inline int fill_fib_params_v6(struct __sk_buff *skb,
return 0;
}
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("tc")
+int tc_chk(struct __sk_buff *skb)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
@@ -142,12 +143,14 @@ static __always_inline int tc_redir(struct __sk_buff *skb)
/* these are identical, but keep them separate for compatibility with the
* section names expected by test_tc_redirect.sh
*/
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("tc")
+int tc_dst(struct __sk_buff *skb)
{
return tc_redir(skb);
}
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("tc")
+int tc_src(struct __sk_buff *skb)
{
return tc_redir(skb);
}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_peer.c b/tools/testing/selftests/bpf/progs/test_tc_peer.c
index fc84a7685aa2..365eacb5dc34 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_peer.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_peer.c
@@ -5,41 +5,59 @@
#include <linux/bpf.h>
#include <linux/stddef.h>
#include <linux/pkt_cls.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
#include <bpf/bpf_helpers.h>
-enum {
- dev_src,
- dev_dst,
-};
+volatile const __u32 IFINDEX_SRC;
+volatile const __u32 IFINDEX_DST;
-struct bpf_map_def SEC("maps") ifindex_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 2,
-};
+static const __u8 src_mac[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55};
+static const __u8 dst_mac[] = {0x00, 0x22, 0x33, 0x44, 0x55, 0x66};
-static __always_inline int get_dev_ifindex(int which)
+SEC("tc")
+int tc_chk(struct __sk_buff *skb)
{
- int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which);
+ return TC_ACT_SHOT;
+}
- return ifindex ? *ifindex : 0;
+SEC("tc")
+int tc_dst(struct __sk_buff *skb)
+{
+ return bpf_redirect_peer(IFINDEX_SRC, 0);
}
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("tc")
+int tc_src(struct __sk_buff *skb)
{
- return TC_ACT_SHOT;
+ return bpf_redirect_peer(IFINDEX_DST, 0);
}
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("tc")
+int tc_dst_l3(struct __sk_buff *skb)
{
- return bpf_redirect_peer(get_dev_ifindex(dev_src), 0);
+ return bpf_redirect(IFINDEX_SRC, 0);
}
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("tc")
+int tc_src_l3(struct __sk_buff *skb)
{
- return bpf_redirect_peer(get_dev_ifindex(dev_dst), 0);
+ __u16 proto = skb->protocol;
+
+ if (bpf_skb_change_head(skb, ETH_HLEN, 0) != 0)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_store_bytes(skb, 0, &src_mac, ETH_ALEN, 0) != 0)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_store_bytes(skb, ETH_ALEN, &dst_mac, ETH_ALEN, 0) != 0)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_store_bytes(skb, ETH_ALEN + ETH_ALEN, &proto, sizeof(__u16), 0) != 0)
+ return TC_ACT_SHOT;
+
+ return bpf_redirect_peer(IFINDEX_DST, 0);
}
char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
index 37bce7a7c394..404124a93892 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
@@ -19,19 +19,41 @@
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
static const int cfg_port = 8000;
static const int cfg_udp_src = 20000;
+#define L2_PAD_SZ (sizeof(struct vxlanhdr) + ETH_HLEN)
+
#define UDP_PORT 5555
#define MPLS_OVER_UDP_PORT 6635
#define ETH_OVER_UDP_PORT 7777
+#define VXLAN_UDP_PORT 8472
+
+#define EXTPROTO_VXLAN 0x1
+
+#define VXLAN_N_VID (1u << 24)
+#define VXLAN_VNI_MASK bpf_htonl((VXLAN_N_VID - 1) << 8)
+#define VXLAN_FLAGS 0x8
+#define VXLAN_VNI 1
+
+#ifndef NEXTHDR_DEST
+#define NEXTHDR_DEST 60
+#endif
/* MPLS label 1000 with S bit (last label) set and ttl of 255. */
static const __u32 mpls_label = __bpf_constant_htonl(1000 << 12 |
MPLS_LS_S_MASK | 0xff);
+struct vxlanhdr {
+ __be32 vx_flags;
+ __be32 vx_vni;
+} __attribute__((packed));
+
struct gre_hdr {
__be16 flags;
__be16 protocol;
@@ -45,13 +67,13 @@ union l4hdr {
struct v4hdr {
struct iphdr ip;
union l4hdr l4hdr;
- __u8 pad[16]; /* enough space for L2 header */
+ __u8 pad[L2_PAD_SZ]; /* space for L2 header / vxlan header ... */
} __attribute__((packed));
struct v6hdr {
struct ipv6hdr ip;
union l4hdr l4hdr;
- __u8 pad[16]; /* enough space for L2 header */
+ __u8 pad[L2_PAD_SZ]; /* space for L2 header / vxlan header ... */
} __attribute__((packed));
static __always_inline void set_ipv4_csum(struct iphdr *iph)
@@ -62,21 +84,22 @@ static __always_inline void set_ipv4_csum(struct iphdr *iph)
iph->check = 0;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0, csum = 0; i < sizeof(*iph) >> 1; i++)
csum += *iph16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
}
-static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
- __u16 l2_proto)
+static __always_inline int __encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
+ __u16 l2_proto, __u16 ext_proto)
{
__u16 udp_dst = UDP_PORT;
struct iphdr iph_inner;
struct v4hdr h_outer;
struct tcphdr tcph;
int olen, l2_len;
+ __u8 *l2_hdr = NULL;
int tcp_off;
__u64 flags;
@@ -141,7 +164,11 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
break;
case ETH_P_TEB:
l2_len = ETH_HLEN;
- udp_dst = ETH_OVER_UDP_PORT;
+ if (ext_proto & EXTPROTO_VXLAN) {
+ udp_dst = VXLAN_UDP_PORT;
+ l2_len += sizeof(struct vxlanhdr);
+ } else
+ udp_dst = ETH_OVER_UDP_PORT;
break;
}
flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
@@ -171,14 +198,26 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
}
/* add L2 encap (if specified) */
+ l2_hdr = (__u8 *)&h_outer + olen;
switch (l2_proto) {
case ETH_P_MPLS_UC:
- *((__u32 *)((__u8 *)&h_outer + olen)) = mpls_label;
+ *(__u32 *)l2_hdr = mpls_label;
break;
case ETH_P_TEB:
- if (bpf_skb_load_bytes(skb, 0, (__u8 *)&h_outer + olen,
- ETH_HLEN))
+ flags |= BPF_F_ADJ_ROOM_ENCAP_L2_ETH;
+
+ if (ext_proto & EXTPROTO_VXLAN) {
+ struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr;
+
+ vxlan_hdr->vx_flags = VXLAN_FLAGS;
+ vxlan_hdr->vx_vni = bpf_htonl((VXLAN_VNI & VXLAN_VNI_MASK) << 8);
+
+ l2_hdr += sizeof(struct vxlanhdr);
+ }
+
+ if (bpf_skb_load_bytes(skb, 0, l2_hdr, ETH_HLEN))
return TC_ACT_SHOT;
+
break;
}
olen += l2_len;
@@ -214,14 +253,21 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
return TC_ACT_OK;
}
-static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
+static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
__u16 l2_proto)
{
+ return __encap_ipv4(skb, encap_proto, l2_proto, 0);
+}
+
+static __always_inline int __encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
+ __u16 l2_proto, __u16 ext_proto)
+{
__u16 udp_dst = UDP_PORT;
struct ipv6hdr iph_inner;
struct v6hdr h_outer;
struct tcphdr tcph;
int olen, l2_len;
+ __u8 *l2_hdr = NULL;
__u16 tot_len;
__u64 flags;
@@ -249,7 +295,11 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
break;
case ETH_P_TEB:
l2_len = ETH_HLEN;
- udp_dst = ETH_OVER_UDP_PORT;
+ if (ext_proto & EXTPROTO_VXLAN) {
+ udp_dst = VXLAN_UDP_PORT;
+ l2_len += sizeof(struct vxlanhdr);
+ } else
+ udp_dst = ETH_OVER_UDP_PORT;
break;
}
flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
@@ -267,7 +317,7 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
h_outer.l4hdr.udp.dest = bpf_htons(udp_dst);
tot_len = bpf_ntohs(iph_inner.payload_len) + sizeof(iph_inner) +
- sizeof(h_outer.l4hdr.udp);
+ sizeof(h_outer.l4hdr.udp) + l2_len;
h_outer.l4hdr.udp.check = 0;
h_outer.l4hdr.udp.len = bpf_htons(tot_len);
break;
@@ -278,13 +328,24 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
}
/* add L2 encap (if specified) */
+ l2_hdr = (__u8 *)&h_outer + olen;
switch (l2_proto) {
case ETH_P_MPLS_UC:
- *((__u32 *)((__u8 *)&h_outer + olen)) = mpls_label;
+ *(__u32 *)l2_hdr = mpls_label;
break;
case ETH_P_TEB:
- if (bpf_skb_load_bytes(skb, 0, (__u8 *)&h_outer + olen,
- ETH_HLEN))
+ flags |= BPF_F_ADJ_ROOM_ENCAP_L2_ETH;
+
+ if (ext_proto & EXTPROTO_VXLAN) {
+ struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr;
+
+ vxlan_hdr->vx_flags = VXLAN_FLAGS;
+ vxlan_hdr->vx_vni = bpf_htonl((VXLAN_VNI & VXLAN_VNI_MASK) << 8);
+
+ l2_hdr += sizeof(struct vxlanhdr);
+ }
+
+ if (bpf_skb_load_bytes(skb, 0, l2_hdr, ETH_HLEN))
return TC_ACT_SHOT;
break;
}
@@ -309,6 +370,67 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
return TC_ACT_OK;
}
+static int encap_ipv6_ipip6(struct __sk_buff *skb)
+{
+ struct iphdr iph_inner;
+ struct v6hdr h_outer;
+ struct tcphdr tcph;
+ struct ethhdr eth;
+ __u64 flags;
+ int olen;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
+ sizeof(iph_inner)) < 0)
+ return TC_ACT_OK;
+
+ /* filter only packets we want */
+ if (bpf_skb_load_bytes(skb, ETH_HLEN + (iph_inner.ihl << 2),
+ &tcph, sizeof(tcph)) < 0)
+ return TC_ACT_OK;
+
+ if (tcph.dest != __bpf_constant_htons(cfg_port))
+ return TC_ACT_OK;
+
+ olen = sizeof(h_outer.ip);
+
+ flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6;
+
+ /* add room between mac and network header */
+ if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
+ return TC_ACT_SHOT;
+
+ /* prepare new outer network header */
+ memset(&h_outer.ip, 0, sizeof(h_outer.ip));
+ h_outer.ip.version = 6;
+ h_outer.ip.hop_limit = iph_inner.ttl;
+ h_outer.ip.saddr.s6_addr[1] = 0xfd;
+ h_outer.ip.saddr.s6_addr[15] = 1;
+ h_outer.ip.daddr.s6_addr[1] = 0xfd;
+ h_outer.ip.daddr.s6_addr[15] = 2;
+ h_outer.ip.payload_len = iph_inner.tot_len;
+ h_outer.ip.nexthdr = IPPROTO_IPIP;
+
+ /* store new outer network header */
+ if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
+ BPF_F_INVALIDATE_HASH) < 0)
+ return TC_ACT_SHOT;
+
+ /* update eth->h_proto */
+ if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)) < 0)
+ return TC_ACT_SHOT;
+ eth.h_proto = bpf_htons(ETH_P_IPV6);
+ if (bpf_skb_store_bytes(skb, 0, &eth, sizeof(eth), 0) < 0)
+ return TC_ACT_SHOT;
+
+ return TC_ACT_OK;
+}
+
+static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
+ __u16 l2_proto)
+{
+ return __encap_ipv6(skb, encap_proto, l2_proto, 0);
+}
+
SEC("encap_ipip_none")
int __encap_ipip_none(struct __sk_buff *skb)
{
@@ -372,6 +494,17 @@ int __encap_udp_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
+SEC("encap_vxlan_eth")
+int __encap_vxlan_eth(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return __encap_ipv4(skb, IPPROTO_UDP,
+ ETH_P_TEB,
+ EXTPROTO_VXLAN);
+ else
+ return TC_ACT_OK;
+}
+
SEC("encap_sit_none")
int __encap_sit_none(struct __sk_buff *skb)
{
@@ -390,6 +523,15 @@ int __encap_ip6tnl_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
+SEC("encap_ipip6_none")
+int __encap_ipip6_none(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return encap_ipv6_ipip6(skb);
+ else
+ return TC_ACT_OK;
+}
+
SEC("encap_ip6gre_none")
int __encap_ip6gre_none(struct __sk_buff *skb)
{
@@ -444,16 +586,46 @@ int __encap_ip6udp_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
+SEC("encap_ip6vxlan_eth")
+int __encap_ip6vxlan_eth(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+ return __encap_ipv6(skb, IPPROTO_UDP,
+ ETH_P_TEB,
+ EXTPROTO_VXLAN);
+ else
+ return TC_ACT_OK;
+}
+
static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
{
- char buf[sizeof(struct v6hdr)];
+ __u64 flags = BPF_F_ADJ_ROOM_FIXED_GSO;
+ struct ipv6_opt_hdr ip6_opt_hdr;
struct gre_hdr greh;
struct udphdr udph;
int olen = len;
switch (proto) {
case IPPROTO_IPIP:
+ flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV4;
+ break;
case IPPROTO_IPV6:
+ flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV6;
+ break;
+ case NEXTHDR_DEST:
+ if (bpf_skb_load_bytes(skb, off + len, &ip6_opt_hdr,
+ sizeof(ip6_opt_hdr)) < 0)
+ return TC_ACT_OK;
+ switch (ip6_opt_hdr.nexthdr) {
+ case IPPROTO_IPIP:
+ flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV4;
+ break;
+ case IPPROTO_IPV6:
+ flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV6;
+ break;
+ default:
+ return TC_ACT_OK;
+ }
break;
case IPPROTO_GRE:
olen += sizeof(struct gre_hdr);
@@ -479,14 +651,16 @@ static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
case ETH_OVER_UDP_PORT:
olen += ETH_HLEN;
break;
+ case VXLAN_UDP_PORT:
+ olen += ETH_HLEN + sizeof(struct vxlanhdr);
+ break;
}
break;
default:
return TC_ACT_OK;
}
- if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC,
- BPF_F_ADJ_ROOM_FIXED_GSO))
+ if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC, flags))
return TC_ACT_SHOT;
return TC_ACT_OK;
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c b/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c
deleted file mode 100644
index 47cbe2eeae43..000000000000
--- a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c
+++ /dev/null
@@ -1,167 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018 Facebook
-// Copyright (c) 2019 Cloudflare
-
-#include <string.h>
-
-#include <linux/bpf.h>
-#include <linux/pkt_cls.h>
-#include <linux/if_ether.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <sys/socket.h>
-#include <linux/tcp.h>
-
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_endian.h>
-
-struct bpf_map_def SEC("maps") results = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u32),
- .max_entries = 3,
-};
-
-static __always_inline __s64 gen_syncookie(void *data_end, struct bpf_sock *sk,
- void *iph, __u32 ip_size,
- struct tcphdr *tcph)
-{
- __u32 thlen = tcph->doff * 4;
-
- if (tcph->syn && !tcph->ack) {
- // packet should only have an MSS option
- if (thlen != 24)
- return 0;
-
- if ((void *)tcph + thlen > data_end)
- return 0;
-
- return bpf_tcp_gen_syncookie(sk, iph, ip_size, tcph, thlen);
- }
- return 0;
-}
-
-static __always_inline void check_syncookie(void *ctx, void *data,
- void *data_end)
-{
- struct bpf_sock_tuple tup;
- struct bpf_sock *sk;
- struct ethhdr *ethh;
- struct iphdr *ipv4h;
- struct ipv6hdr *ipv6h;
- struct tcphdr *tcph;
- int ret;
- __u32 key_mss = 2;
- __u32 key_gen = 1;
- __u32 key = 0;
- __s64 seq_mss;
-
- ethh = data;
- if (ethh + 1 > data_end)
- return;
-
- switch (bpf_ntohs(ethh->h_proto)) {
- case ETH_P_IP:
- ipv4h = data + sizeof(struct ethhdr);
- if (ipv4h + 1 > data_end)
- return;
-
- if (ipv4h->ihl != 5)
- return;
-
- tcph = data + sizeof(struct ethhdr) + sizeof(struct iphdr);
- if (tcph + 1 > data_end)
- return;
-
- tup.ipv4.saddr = ipv4h->saddr;
- tup.ipv4.daddr = ipv4h->daddr;
- tup.ipv4.sport = tcph->source;
- tup.ipv4.dport = tcph->dest;
-
- sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv4),
- BPF_F_CURRENT_NETNS, 0);
- if (!sk)
- return;
-
- if (sk->state != BPF_TCP_LISTEN)
- goto release;
-
- seq_mss = gen_syncookie(data_end, sk, ipv4h, sizeof(*ipv4h),
- tcph);
-
- ret = bpf_tcp_check_syncookie(sk, ipv4h, sizeof(*ipv4h),
- tcph, sizeof(*tcph));
- break;
-
- case ETH_P_IPV6:
- ipv6h = data + sizeof(struct ethhdr);
- if (ipv6h + 1 > data_end)
- return;
-
- if (ipv6h->nexthdr != IPPROTO_TCP)
- return;
-
- tcph = data + sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
- if (tcph + 1 > data_end)
- return;
-
- memcpy(tup.ipv6.saddr, &ipv6h->saddr, sizeof(tup.ipv6.saddr));
- memcpy(tup.ipv6.daddr, &ipv6h->daddr, sizeof(tup.ipv6.daddr));
- tup.ipv6.sport = tcph->source;
- tup.ipv6.dport = tcph->dest;
-
- sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv6),
- BPF_F_CURRENT_NETNS, 0);
- if (!sk)
- return;
-
- if (sk->state != BPF_TCP_LISTEN)
- goto release;
-
- seq_mss = gen_syncookie(data_end, sk, ipv6h, sizeof(*ipv6h),
- tcph);
-
- ret = bpf_tcp_check_syncookie(sk, ipv6h, sizeof(*ipv6h),
- tcph, sizeof(*tcph));
- break;
-
- default:
- return;
- }
-
- if (seq_mss > 0) {
- __u32 cookie = (__u32)seq_mss;
- __u32 mss = seq_mss >> 32;
-
- bpf_map_update_elem(&results, &key_gen, &cookie, 0);
- bpf_map_update_elem(&results, &key_mss, &mss, 0);
- }
-
- if (ret == 0) {
- __u32 cookie = bpf_ntohl(tcph->ack_seq) - 1;
-
- bpf_map_update_elem(&results, &key, &cookie, 0);
- }
-
-release:
- bpf_sk_release(sk);
-}
-
-SEC("clsact/check_syncookie")
-int check_syncookie_clsact(struct __sk_buff *skb)
-{
- check_syncookie(skb, (void *)(long)skb->data,
- (void *)(long)skb->data_end);
- return TC_ACT_OK;
-}
-
-SEC("xdp/check_syncookie")
-int check_syncookie_xdp(struct xdp_md *ctx)
-{
- check_syncookie(ctx, (void *)(long)ctx->data,
- (void *)(long)ctx->data_end);
- return XDP_PASS;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
new file mode 100644
index 000000000000..7d5293de1952
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_tracing_net.h"
+#include "bpf_kfuncs.h"
+#include "test_siphash.h"
+#include "test_tcp_custom_syncookie.h"
+#include "bpf_misc.h"
+
+#define MAX_PACKET_OFF 0xffff
+
+/* Hash is calculated for each client and split into ISN and TS.
+ *
+ * MSB LSB
+ * ISN: | 31 ... 8 | 7 6 | 5 | 4 | 3 2 1 0 |
+ * | Hash_1 | MSS | ECN | SACK | WScale |
+ *
+ * TS: | 31 ... 8 | 7 ... 0 |
+ * | Random | Hash_2 |
+ */
+#define COOKIE_BITS 8
+#define COOKIE_MASK (((__u32)1 << COOKIE_BITS) - 1)
+
+enum {
+ /* 0xf is invalid thus means that SYN did not have WScale. */
+ BPF_SYNCOOKIE_WSCALE_MASK = (1 << 4) - 1,
+ BPF_SYNCOOKIE_SACK = (1 << 4),
+ BPF_SYNCOOKIE_ECN = (1 << 5),
+};
+
+#define MSS_LOCAL_IPV4 65495
+#define MSS_LOCAL_IPV6 65476
+
+const __u16 msstab4[] = {
+ 536,
+ 1300,
+ 1460,
+ MSS_LOCAL_IPV4,
+};
+
+const __u16 msstab6[] = {
+ 1280 - 60, /* IPV6_MIN_MTU - 60 */
+ 1480 - 60,
+ 9000 - 60,
+ MSS_LOCAL_IPV6,
+};
+
+static siphash_key_t test_key_siphash = {
+ { 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }
+};
+
+struct tcp_syncookie {
+ struct __sk_buff *skb;
+ void *data;
+ void *data_end;
+ struct ethhdr *eth;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ struct tcphdr *tcp;
+ __be32 *ptr32;
+ struct bpf_tcp_req_attrs attrs;
+ u32 off;
+ u32 cookie;
+ u64 first;
+};
+
+bool handled_syn, handled_ack;
+
+static int tcp_load_headers(struct tcp_syncookie *ctx)
+{
+ ctx->data = (void *)(long)ctx->skb->data;
+ ctx->data_end = (void *)(long)ctx->skb->data_end;
+ ctx->eth = (struct ethhdr *)(long)ctx->skb->data;
+
+ if (ctx->eth + 1 > ctx->data_end)
+ goto err;
+
+ switch (bpf_ntohs(ctx->eth->h_proto)) {
+ case ETH_P_IP:
+ ctx->ipv4 = (struct iphdr *)(ctx->eth + 1);
+
+ if (ctx->ipv4 + 1 > ctx->data_end)
+ goto err;
+
+ if (ctx->ipv4->ihl != sizeof(*ctx->ipv4) / 4)
+ goto err;
+
+ if (ctx->ipv4->version != 4)
+ goto err;
+
+ if (ctx->ipv4->protocol != IPPROTO_TCP)
+ goto err;
+
+ ctx->tcp = (struct tcphdr *)(ctx->ipv4 + 1);
+ break;
+ case ETH_P_IPV6:
+ ctx->ipv6 = (struct ipv6hdr *)(ctx->eth + 1);
+
+ if (ctx->ipv6 + 1 > ctx->data_end)
+ goto err;
+
+ if (ctx->ipv6->version != 6)
+ goto err;
+
+ if (ctx->ipv6->nexthdr != NEXTHDR_TCP)
+ goto err;
+
+ ctx->tcp = (struct tcphdr *)(ctx->ipv6 + 1);
+ break;
+ default:
+ goto err;
+ }
+
+ if (ctx->tcp + 1 > ctx->data_end)
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static int tcp_reload_headers(struct tcp_syncookie *ctx)
+{
+ /* Without volatile,
+ * R3 32-bit pointer arithmetic prohibited
+ */
+ volatile u64 data_len = ctx->skb->data_end - ctx->skb->data;
+
+ if (ctx->tcp->doff < sizeof(*ctx->tcp) / 4)
+ goto err;
+
+ /* Needed to calculate csum and parse TCP options. */
+ if (bpf_skb_change_tail(ctx->skb, data_len + 60 - ctx->tcp->doff * 4, 0))
+ goto err;
+
+ ctx->data = (void *)(long)ctx->skb->data;
+ ctx->data_end = (void *)(long)ctx->skb->data_end;
+ ctx->eth = (struct ethhdr *)(long)ctx->skb->data;
+ if (ctx->ipv4) {
+ ctx->ipv4 = (struct iphdr *)(ctx->eth + 1);
+ ctx->ipv6 = NULL;
+ ctx->tcp = (struct tcphdr *)(ctx->ipv4 + 1);
+ } else {
+ ctx->ipv4 = NULL;
+ ctx->ipv6 = (struct ipv6hdr *)(ctx->eth + 1);
+ ctx->tcp = (struct tcphdr *)(ctx->ipv6 + 1);
+ }
+
+ if ((void *)ctx->tcp + 60 > ctx->data_end)
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static __sum16 tcp_v4_csum(struct tcp_syncookie *ctx, __wsum csum)
+{
+ return csum_tcpudp_magic(ctx->ipv4->saddr, ctx->ipv4->daddr,
+ ctx->tcp->doff * 4, IPPROTO_TCP, csum);
+}
+
+static __sum16 tcp_v6_csum(struct tcp_syncookie *ctx, __wsum csum)
+{
+ return csum_ipv6_magic(&ctx->ipv6->saddr, &ctx->ipv6->daddr,
+ ctx->tcp->doff * 4, IPPROTO_TCP, csum);
+}
+
+static int tcp_validate_header(struct tcp_syncookie *ctx)
+{
+ s64 csum;
+
+ if (tcp_reload_headers(ctx))
+ goto err;
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0);
+ if (csum < 0)
+ goto err;
+
+ if (ctx->ipv4) {
+ /* check tcp_v4_csum(csum) is 0 if not on lo. */
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->ipv4, ctx->ipv4->ihl * 4, 0);
+ if (csum < 0)
+ goto err;
+
+ if (csum_fold(csum) != 0)
+ goto err;
+ } else if (ctx->ipv6) {
+ /* check tcp_v6_csum(csum) is 0 if not on lo. */
+ }
+
+ return 0;
+err:
+ return -1;
+}
+
+static __always_inline void *next(struct tcp_syncookie *ctx, __u32 sz)
+{
+ __u64 off = ctx->off;
+ __u8 *data;
+
+ /* Verifier forbids access to packet when offset exceeds MAX_PACKET_OFF */
+ if (off > MAX_PACKET_OFF - sz)
+ return NULL;
+
+ data = ctx->data + off;
+ barrier_var(data);
+ if (data + sz >= ctx->data_end)
+ return NULL;
+
+ ctx->off += sz;
+ return data;
+}
+
+static int tcp_parse_option(__u32 index, struct tcp_syncookie *ctx)
+{
+ __u8 *opcode, *opsize, *wscale;
+ __u32 *tsval, *tsecr;
+ __u16 *mss;
+ __u32 off;
+
+ off = ctx->off;
+ opcode = next(ctx, 1);
+ if (!opcode)
+ goto stop;
+
+ if (*opcode == TCPOPT_EOL)
+ goto stop;
+
+ if (*opcode == TCPOPT_NOP)
+ goto next;
+
+ opsize = next(ctx, 1);
+ if (!opsize)
+ goto stop;
+
+ if (*opsize < 2)
+ goto stop;
+
+ switch (*opcode) {
+ case TCPOPT_MSS:
+ mss = next(ctx, 2);
+ if (*opsize == TCPOLEN_MSS && ctx->tcp->syn && mss)
+ ctx->attrs.mss = get_unaligned_be16(mss);
+ break;
+ case TCPOPT_WINDOW:
+ wscale = next(ctx, 1);
+ if (*opsize == TCPOLEN_WINDOW && ctx->tcp->syn && wscale) {
+ ctx->attrs.wscale_ok = 1;
+ ctx->attrs.snd_wscale = *wscale;
+ }
+ break;
+ case TCPOPT_TIMESTAMP:
+ tsval = next(ctx, 4);
+ tsecr = next(ctx, 4);
+ if (*opsize == TCPOLEN_TIMESTAMP && tsval && tsecr) {
+ ctx->attrs.rcv_tsval = get_unaligned_be32(tsval);
+ ctx->attrs.rcv_tsecr = get_unaligned_be32(tsecr);
+
+ if (ctx->tcp->syn && ctx->attrs.rcv_tsecr)
+ ctx->attrs.tstamp_ok = 0;
+ else
+ ctx->attrs.tstamp_ok = 1;
+ }
+ break;
+ case TCPOPT_SACK_PERM:
+ if (*opsize == TCPOLEN_SACK_PERM && ctx->tcp->syn)
+ ctx->attrs.sack_ok = 1;
+ break;
+ }
+
+ ctx->off = off + *opsize;
+next:
+ return 0;
+stop:
+ return 1;
+}
+
+static void tcp_parse_options(struct tcp_syncookie *ctx)
+{
+ ctx->off = (__u8 *)(ctx->tcp + 1) - (__u8 *)ctx->data,
+
+ bpf_loop(40, tcp_parse_option, ctx, 0);
+}
+
+static int tcp_validate_sysctl(struct tcp_syncookie *ctx)
+{
+ if ((ctx->ipv4 && ctx->attrs.mss != MSS_LOCAL_IPV4) ||
+ (ctx->ipv6 && ctx->attrs.mss != MSS_LOCAL_IPV6))
+ goto err;
+
+ if (!ctx->attrs.wscale_ok ||
+ !ctx->attrs.snd_wscale ||
+ ctx->attrs.snd_wscale >= BPF_SYNCOOKIE_WSCALE_MASK)
+ goto err;
+
+ if (!ctx->attrs.tstamp_ok)
+ goto err;
+
+ if (!ctx->attrs.sack_ok)
+ goto err;
+
+ if (!ctx->tcp->ece || !ctx->tcp->cwr)
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static void tcp_prepare_cookie(struct tcp_syncookie *ctx)
+{
+ u32 seq = bpf_ntohl(ctx->tcp->seq);
+ u64 first = 0, second;
+ int mssind = 0;
+ u32 hash;
+
+ if (ctx->ipv4) {
+ for (mssind = ARRAY_SIZE(msstab4) - 1; mssind; mssind--)
+ if (ctx->attrs.mss >= msstab4[mssind])
+ break;
+
+ ctx->attrs.mss = msstab4[mssind];
+
+ first = (u64)ctx->ipv4->saddr << 32 | ctx->ipv4->daddr;
+ } else if (ctx->ipv6) {
+ for (mssind = ARRAY_SIZE(msstab6) - 1; mssind; mssind--)
+ if (ctx->attrs.mss >= msstab6[mssind])
+ break;
+
+ ctx->attrs.mss = msstab6[mssind];
+
+ first = (u64)ctx->ipv6->saddr.in6_u.u6_addr8[0] << 32 |
+ ctx->ipv6->daddr.in6_u.u6_addr32[0];
+ }
+
+ second = (u64)seq << 32 | ctx->tcp->source << 16 | ctx->tcp->dest;
+ hash = siphash_2u64(first, second, &test_key_siphash);
+
+ if (ctx->attrs.tstamp_ok) {
+ ctx->attrs.rcv_tsecr = bpf_get_prandom_u32();
+ ctx->attrs.rcv_tsecr &= ~COOKIE_MASK;
+ ctx->attrs.rcv_tsecr |= hash & COOKIE_MASK;
+ }
+
+ hash &= ~COOKIE_MASK;
+ hash |= mssind << 6;
+
+ if (ctx->attrs.wscale_ok)
+ hash |= ctx->attrs.snd_wscale & BPF_SYNCOOKIE_WSCALE_MASK;
+
+ if (ctx->attrs.sack_ok)
+ hash |= BPF_SYNCOOKIE_SACK;
+
+ if (ctx->attrs.tstamp_ok && ctx->tcp->ece && ctx->tcp->cwr)
+ hash |= BPF_SYNCOOKIE_ECN;
+
+ ctx->cookie = hash;
+}
+
+static void tcp_write_options(struct tcp_syncookie *ctx)
+{
+ ctx->ptr32 = (__be32 *)(ctx->tcp + 1);
+
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_MSS << 24 | TCPOLEN_MSS << 16 |
+ ctx->attrs.mss);
+
+ if (ctx->attrs.wscale_ok)
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 |
+ TCPOPT_WINDOW << 16 |
+ TCPOLEN_WINDOW << 8 |
+ ctx->attrs.snd_wscale);
+
+ if (ctx->attrs.tstamp_ok) {
+ if (ctx->attrs.sack_ok)
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_SACK_PERM << 24 |
+ TCPOLEN_SACK_PERM << 16 |
+ TCPOPT_TIMESTAMP << 8 |
+ TCPOLEN_TIMESTAMP);
+ else
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 |
+ TCPOPT_NOP << 16 |
+ TCPOPT_TIMESTAMP << 8 |
+ TCPOLEN_TIMESTAMP);
+
+ *ctx->ptr32++ = bpf_htonl(ctx->attrs.rcv_tsecr);
+ *ctx->ptr32++ = bpf_htonl(ctx->attrs.rcv_tsval);
+ } else if (ctx->attrs.sack_ok) {
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 |
+ TCPOPT_NOP << 16 |
+ TCPOPT_SACK_PERM << 8 |
+ TCPOLEN_SACK_PERM);
+ }
+}
+
+static int tcp_handle_syn(struct tcp_syncookie *ctx)
+{
+ s64 csum;
+
+ if (tcp_validate_header(ctx))
+ goto err;
+
+ tcp_parse_options(ctx);
+
+ if (tcp_validate_sysctl(ctx))
+ goto err;
+
+ tcp_prepare_cookie(ctx);
+ tcp_write_options(ctx);
+
+ swap(ctx->tcp->source, ctx->tcp->dest);
+ ctx->tcp->check = 0;
+ ctx->tcp->ack_seq = bpf_htonl(bpf_ntohl(ctx->tcp->seq) + 1);
+ ctx->tcp->seq = bpf_htonl(ctx->cookie);
+ ctx->tcp->doff = ((long)ctx->ptr32 - (long)ctx->tcp) >> 2;
+ ctx->tcp->ack = 1;
+ if (!ctx->attrs.tstamp_ok || !ctx->tcp->ece || !ctx->tcp->cwr)
+ ctx->tcp->ece = 0;
+ ctx->tcp->cwr = 0;
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0);
+ if (csum < 0)
+ goto err;
+
+ if (ctx->ipv4) {
+ swap(ctx->ipv4->saddr, ctx->ipv4->daddr);
+ ctx->tcp->check = tcp_v4_csum(ctx, csum);
+
+ ctx->ipv4->check = 0;
+ ctx->ipv4->tos = 0;
+ ctx->ipv4->tot_len = bpf_htons((long)ctx->ptr32 - (long)ctx->ipv4);
+ ctx->ipv4->id = 0;
+ ctx->ipv4->ttl = 64;
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->ipv4, sizeof(*ctx->ipv4), 0);
+ if (csum < 0)
+ goto err;
+
+ ctx->ipv4->check = csum_fold(csum);
+ } else if (ctx->ipv6) {
+ swap(ctx->ipv6->saddr, ctx->ipv6->daddr);
+ ctx->tcp->check = tcp_v6_csum(ctx, csum);
+
+ *(__be32 *)ctx->ipv6 = bpf_htonl(0x60000000);
+ ctx->ipv6->payload_len = bpf_htons((long)ctx->ptr32 - (long)ctx->tcp);
+ ctx->ipv6->hop_limit = 64;
+ }
+
+ swap_array(ctx->eth->h_source, ctx->eth->h_dest);
+
+ if (bpf_skb_change_tail(ctx->skb, (long)ctx->ptr32 - (long)ctx->eth, 0))
+ goto err;
+
+ return bpf_redirect(ctx->skb->ifindex, 0);
+err:
+ return TC_ACT_SHOT;
+}
+
+static int tcp_validate_cookie(struct tcp_syncookie *ctx)
+{
+ u32 cookie = bpf_ntohl(ctx->tcp->ack_seq) - 1;
+ u32 seq = bpf_ntohl(ctx->tcp->seq) - 1;
+ u64 first = 0, second;
+ int mssind;
+ u32 hash;
+
+ if (ctx->ipv4)
+ first = (u64)ctx->ipv4->saddr << 32 | ctx->ipv4->daddr;
+ else if (ctx->ipv6)
+ first = (u64)ctx->ipv6->saddr.in6_u.u6_addr8[0] << 32 |
+ ctx->ipv6->daddr.in6_u.u6_addr32[0];
+
+ second = (u64)seq << 32 | ctx->tcp->source << 16 | ctx->tcp->dest;
+ hash = siphash_2u64(first, second, &test_key_siphash);
+
+ if (ctx->attrs.tstamp_ok)
+ hash -= ctx->attrs.rcv_tsecr & COOKIE_MASK;
+ else
+ hash &= ~COOKIE_MASK;
+
+ hash -= cookie & ~COOKIE_MASK;
+ if (hash)
+ goto err;
+
+ mssind = (cookie & (3 << 6)) >> 6;
+ if (ctx->ipv4)
+ ctx->attrs.mss = msstab4[mssind];
+ else
+ ctx->attrs.mss = msstab6[mssind];
+
+ ctx->attrs.snd_wscale = cookie & BPF_SYNCOOKIE_WSCALE_MASK;
+ ctx->attrs.rcv_wscale = ctx->attrs.snd_wscale;
+ ctx->attrs.wscale_ok = ctx->attrs.snd_wscale == BPF_SYNCOOKIE_WSCALE_MASK;
+ ctx->attrs.sack_ok = cookie & BPF_SYNCOOKIE_SACK;
+ ctx->attrs.ecn_ok = cookie & BPF_SYNCOOKIE_ECN;
+
+ return 0;
+err:
+ return -1;
+}
+
+static int tcp_handle_ack(struct tcp_syncookie *ctx)
+{
+ struct bpf_sock_tuple tuple;
+ struct bpf_sock *skc;
+ int ret = TC_ACT_OK;
+ struct sock *sk;
+ u32 tuple_size;
+
+ if (ctx->ipv4) {
+ tuple.ipv4.saddr = ctx->ipv4->saddr;
+ tuple.ipv4.daddr = ctx->ipv4->daddr;
+ tuple.ipv4.sport = ctx->tcp->source;
+ tuple.ipv4.dport = ctx->tcp->dest;
+ tuple_size = sizeof(tuple.ipv4);
+ } else if (ctx->ipv6) {
+ __builtin_memcpy(tuple.ipv6.saddr, &ctx->ipv6->saddr, sizeof(tuple.ipv6.saddr));
+ __builtin_memcpy(tuple.ipv6.daddr, &ctx->ipv6->daddr, sizeof(tuple.ipv6.daddr));
+ tuple.ipv6.sport = ctx->tcp->source;
+ tuple.ipv6.dport = ctx->tcp->dest;
+ tuple_size = sizeof(tuple.ipv6);
+ } else {
+ goto out;
+ }
+
+ skc = bpf_skc_lookup_tcp(ctx->skb, &tuple, tuple_size, -1, 0);
+ if (!skc)
+ goto out;
+
+ if (skc->state != TCP_LISTEN)
+ goto release;
+
+ sk = (struct sock *)bpf_skc_to_tcp_sock(skc);
+ if (!sk)
+ goto err;
+
+ if (tcp_validate_header(ctx))
+ goto err;
+
+ tcp_parse_options(ctx);
+
+ if (tcp_validate_cookie(ctx))
+ goto err;
+
+ ret = bpf_sk_assign_tcp_reqsk(ctx->skb, sk, &ctx->attrs, sizeof(ctx->attrs));
+ if (ret < 0)
+ goto err;
+
+release:
+ bpf_sk_release(skc);
+out:
+ return ret;
+
+err:
+ ret = TC_ACT_SHOT;
+ goto release;
+}
+
+SEC("tc")
+int tcp_custom_syncookie(struct __sk_buff *skb)
+{
+ struct tcp_syncookie ctx = {
+ .skb = skb,
+ };
+
+ if (tcp_load_headers(&ctx))
+ return TC_ACT_OK;
+
+ if (ctx.tcp->rst)
+ return TC_ACT_OK;
+
+ if (ctx.tcp->syn) {
+ if (ctx.tcp->ack)
+ return TC_ACT_OK;
+
+ handled_syn = true;
+
+ return tcp_handle_syn(&ctx);
+ }
+
+ handled_ack = true;
+
+ return tcp_handle_ack(&ctx);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
new file mode 100644
index 000000000000..34024de6337e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#ifndef _TEST_TCP_SYNCOOKIE_H
+#define _TEST_TCP_SYNCOOKIE_H
+
+#define __packed __attribute__((__packed__))
+#define __force
+
+#define swap(a, b) \
+ do { \
+ typeof(a) __tmp = (a); \
+ (a) = (b); \
+ (b) = __tmp; \
+ } while (0)
+
+#define swap_array(a, b) \
+ do { \
+ typeof(a) __tmp[sizeof(a)]; \
+ __builtin_memcpy(__tmp, a, sizeof(a)); \
+ __builtin_memcpy(a, b, sizeof(a)); \
+ __builtin_memcpy(b, __tmp, sizeof(a)); \
+ } while (0)
+
+/* linux/unaligned.h */
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed * __pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return bpf_ntohs(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return bpf_ntohl(__get_unaligned_t(__be32, p));
+}
+
+/* lib/checksum.c */
+static inline u32 from64to32(u64 x)
+{
+ /* add up 32-bit and 32-bit for 32+c bit */
+ x = (x & 0xffffffff) + (x >> 32);
+ /* add up carry.. */
+ x = (x & 0xffffffff) + (x >> 32);
+ return (u32)x;
+}
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto, __wsum sum)
+{
+ unsigned long long s = (__force u32)sum;
+
+ s += (__force u32)saddr;
+ s += (__force u32)daddr;
+#ifdef __BIG_ENDIAN
+ s += proto + len;
+#else
+ s += (proto + len) << 8;
+#endif
+ return (__force __wsum)from64to32(s);
+}
+
+/* asm-generic/checksum.h */
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return (__force __sum16)~sum;
+}
+
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+/* net/ipv6/ip6_checksum.c */
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum csum)
+{
+ int carry;
+ __u32 ulen;
+ __u32 uproto;
+ __u32 sum = (__force u32)csum;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[0];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[0]);
+ sum += carry;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[1];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[1]);
+ sum += carry;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[2];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[2]);
+ sum += carry;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[3];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[3]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[0];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[0]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[1];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[1]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[2];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[2]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[3];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[3]);
+ sum += carry;
+
+ ulen = (__force u32)bpf_htonl((__u32)len);
+ sum += ulen;
+ carry = (sum < ulen);
+ sum += carry;
+
+ uproto = (__force u32)bpf_htonl(proto);
+ sum += uproto;
+ carry = (sum < uproto);
+ sum += carry;
+
+ return csum_fold((__force __wsum)sum);
+}
+#endif
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
index adc83a54c352..e2ae049c2f85 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_estats.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
@@ -244,7 +244,7 @@ static __always_inline void send_basic_event(struct sock *sk,
bpf_map_update_elem(&ev_record_map, &key, &ev, BPF_ANY);
}
-SEC("dummy_tracepoint")
+SEC("tp/dummy/tracepoint")
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
{
if (!arg->sock)
@@ -255,4 +255,3 @@ int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
index 678bd0fad29e..5f4e87ee949a 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
@@ -594,7 +594,7 @@ static int handle_parse_hdr(struct bpf_sock_ops *skops)
return CG_OK;
}
-SEC("sockops/estab")
+SEC("sockops")
int estab(struct bpf_sock_ops *skops)
{
int true_val = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
index 3e6912e4df3d..6935f32eeb8f 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
@@ -1,63 +1,40 @@
// SPDX-License-Identifier: GPL-2.0
-#include <stddef.h>
-#include <string.h>
-#include <netinet/in.h>
-#include <linux/bpf.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/types.h>
-#include <linux/socket.h>
-#include <linux/tcp.h>
+#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_tcpbpf.h"
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 4);
- __type(key, __u32);
- __type(value, struct tcpbpf_globals);
-} global_map SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 2);
- __type(key, __u32);
- __type(value, int);
-} sockopt_results SEC(".maps");
-
-static inline void update_event_map(int event)
+struct tcpbpf_globals global = {};
+
+/**
+ * SOL_TCP is defined in <netinet/tcp.h> while
+ * TCP_SAVED_SYN is defined in already included <linux/tcp.h>
+ */
+#ifndef SOL_TCP
+#define SOL_TCP 6
+#endif
+
+static __always_inline int get_tp_window_clamp(struct bpf_sock_ops *skops)
{
- __u32 key = 0;
- struct tcpbpf_globals g, *gp;
-
- gp = bpf_map_lookup_elem(&global_map, &key);
- if (gp == NULL) {
- struct tcpbpf_globals g = {0};
-
- g.event_map |= (1 << event);
- bpf_map_update_elem(&global_map, &key, &g,
- BPF_ANY);
- } else {
- g = *gp;
- g.event_map |= (1 << event);
- bpf_map_update_elem(&global_map, &key, &g,
- BPF_ANY);
- }
+ struct bpf_sock *sk;
+ struct tcp_sock *tp;
+
+ sk = skops->sk;
+ if (!sk)
+ return -1;
+ tp = bpf_skc_to_tcp_sock(sk);
+ if (!tp)
+ return -1;
+ return tp->window_clamp;
}
-int _version SEC("version") = 1;
-
SEC("sockops")
int bpf_testcb(struct bpf_sock_ops *skops)
{
char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)];
struct bpf_sock_ops *reuse = skops;
struct tcphdr *thdr;
- int good_call_rv = 0;
- int bad_call_rv = 0;
+ int window_clamp = 9216;
int save_syn = 1;
int rv = -1;
int v = 0;
@@ -71,7 +48,7 @@ int bpf_testcb(struct bpf_sock_ops *skops)
asm volatile (
"%[op] = *(u32 *)(%[skops] +96)"
- : [op] "+r"(op)
+ : [op] "=r"(op)
: [skops] "r"(skops)
:);
@@ -105,29 +82,20 @@ int bpf_testcb(struct bpf_sock_ops *skops)
op = (int) skops->op;
- update_event_map(op);
+ global.event_map |= (1 << op);
switch (op) {
+ case BPF_SOCK_OPS_TCP_CONNECT_CB:
+ rv = bpf_setsockopt(skops, SOL_TCP, TCP_WINDOW_CLAMP,
+ &window_clamp, sizeof(window_clamp));
+ global.window_clamp_client = get_tp_window_clamp(skops);
+ break;
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
/* Test failure to set largest cb flag (assumes not defined) */
- bad_call_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);
+ global.bad_cb_test_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);
/* Set callback */
- good_call_rv = bpf_sock_ops_cb_flags_set(skops,
+ global.good_cb_test_rv = bpf_sock_ops_cb_flags_set(skops,
BPF_SOCK_OPS_STATE_CB_FLAG);
- /* Update results */
- {
- __u32 key = 0;
- struct tcpbpf_globals g, *gp;
-
- gp = bpf_map_lookup_elem(&global_map, &key);
- if (!gp)
- break;
- g = *gp;
- g.bad_cb_test_rv = bad_call_rv;
- g.good_cb_test_rv = good_call_rv;
- bpf_map_update_elem(&global_map, &key, &g,
- BPF_ANY);
- }
break;
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
skops->sk_txhash = 0x12345f;
@@ -143,12 +111,14 @@ int bpf_testcb(struct bpf_sock_ops *skops)
thdr = (struct tcphdr *)(header + offset);
v = thdr->syn;
- __u32 key = 1;
- bpf_map_update_elem(&sockopt_results, &key, &v,
- BPF_ANY);
+ global.tcp_saved_syn = v;
}
}
+ rv = bpf_setsockopt(skops, SOL_TCP, TCP_WINDOW_CLAMP,
+ &window_clamp, sizeof(window_clamp));
+
+ global.window_clamp_server = get_tp_window_clamp(skops);
break;
case BPF_SOCK_OPS_RTO_CB:
break;
@@ -156,25 +126,16 @@ int bpf_testcb(struct bpf_sock_ops *skops)
break;
case BPF_SOCK_OPS_STATE_CB:
if (skops->args[1] == BPF_TCP_CLOSE) {
- __u32 key = 0;
- struct tcpbpf_globals g, *gp;
-
- gp = bpf_map_lookup_elem(&global_map, &key);
- if (!gp)
- break;
- g = *gp;
if (skops->args[0] == BPF_TCP_LISTEN) {
- g.num_listen++;
+ global.num_listen++;
} else {
- g.total_retrans = skops->total_retrans;
- g.data_segs_in = skops->data_segs_in;
- g.data_segs_out = skops->data_segs_out;
- g.bytes_received = skops->bytes_received;
- g.bytes_acked = skops->bytes_acked;
+ global.total_retrans = skops->total_retrans;
+ global.data_segs_in = skops->data_segs_in;
+ global.data_segs_out = skops->data_segs_out;
+ global.bytes_received = skops->bytes_received;
+ global.bytes_acked = skops->bytes_acked;
}
- g.num_close_events++;
- bpf_map_update_elem(&global_map, &key, &g,
- BPF_ANY);
+ global.num_close_events++;
}
break;
case BPF_SOCK_OPS_TCP_LISTEN_CB:
@@ -182,9 +143,7 @@ int bpf_testcb(struct bpf_sock_ops *skops)
v = bpf_setsockopt(skops, IPPROTO_TCP, TCP_SAVE_SYN,
&save_syn, sizeof(save_syn));
/* Update global map w/ result of setsock opt */
- __u32 key = 0;
-
- bpf_map_update_elem(&sockopt_results, &key, &v, BPF_ANY);
+ global.tcp_save_syn = v;
break;
default:
rv = -1;
diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
index ac63410bb541..540181c115a8 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
@@ -24,12 +24,10 @@ struct {
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 2);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(__u32));
+ __type(key, int);
+ __type(value, __u32);
} perf_event_map SEC(".maps");
-int _version SEC("version") = 1;
-
SEC("sockops")
int bpf_testcb(struct bpf_sock_ops *skops)
{
diff --git a/tools/testing/selftests/bpf/progs/test_time_tai.c b/tools/testing/selftests/bpf/progs/test_time_tai.c
new file mode 100644
index 000000000000..7ea0863f3ddb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_time_tai.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Linutronix GmbH */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("tc")
+int time_tai(struct __sk_buff *skb)
+{
+ __u64 ts1, ts2;
+
+ /* Get TAI timestamps */
+ ts1 = bpf_ktime_get_tai_ns();
+ ts2 = bpf_ktime_get_tai_ns();
+
+ /* Save TAI timestamps (Note: skb->hwtstamp is read-only) */
+ skb->tstamp = ts1;
+ skb->cb[0] = ts2 & 0xffffffff;
+ skb->cb[1] = ts2 >> 32;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
new file mode 100644
index 000000000000..cf0547a613ff
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+SEC("tp_btf/bpf_testmod_test_nullable_bare_tp")
+__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'")
+int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx)
+{
+ return nullable_ctx->len;
+}
+
+SEC("tp_btf/bpf_testmod_test_nullable_bare_tp")
+int BPF_PROG(handle_tp_btf_nullable_bare2, struct bpf_testmod_test_read_ctx *nullable_ctx)
+{
+ if (nullable_ctx)
+ return nullable_ctx->len;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tracepoint.c b/tools/testing/selftests/bpf/progs/test_tracepoint.c
index 4b825ee122cf..4cb8bbb6a320 100644
--- a/tools/testing/selftests/bpf/progs/test_tracepoint.c
+++ b/tools/testing/selftests/bpf/progs/test_tracepoint.c
@@ -1,17 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
-#include <linux/bpf.h>
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */
struct sched_switch_args {
unsigned long long pad;
- char prev_comm[16];
+ char prev_comm[TASK_COMM_LEN];
int prev_pid;
int prev_prio;
long long prev_state;
- char next_comm[16];
+ char next_comm[TASK_COMM_LEN];
int next_pid;
int next_prio;
};
@@ -23,4 +23,3 @@ int oncpu(struct sched_switch_args *ctx)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/progs/test_trampoline_count.c b/tools/testing/selftests/bpf/progs/test_trampoline_count.c
index f030e469d05b..7765720da7d5 100644
--- a/tools/testing/selftests/bpf/progs/test_trampoline_count.c
+++ b/tools/testing/selftests/bpf/progs/test_trampoline_count.c
@@ -1,20 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
-#include <stdbool.h>
-#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-struct task_struct;
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(fentry_test, int a, int *b)
+{
+ return 0;
+}
-SEC("fentry/__set_task_comm")
-int BPF_PROG(prog1, struct task_struct *tsk, const char *buf, bool exec)
+SEC("fmod_ret/bpf_modify_return_test")
+int BPF_PROG(fmod_ret_test, int a, int *b, int ret)
{
return 0;
}
-SEC("fexit/__set_task_comm")
-int BPF_PROG(prog2, struct task_struct *tsk, const char *buf, bool exec)
+SEC("fexit/bpf_modify_return_test")
+int BPF_PROG(fexit_test, int a, int *b, int ret)
{
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
index f48dbfe24ddc..32127f1cd687 100644
--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
@@ -6,45 +6,56 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <stddef.h>
-#include <string.h>
-#include <arpa/inet.h>
-#include <linux/bpf.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/types.h>
-#include <linux/tcp.h>
-#include <linux/socket.h>
-#include <linux/pkt_cls.h>
-#include <linux/erspan.h>
+#include "vmlinux.h"
+#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_kfuncs.h"
+#include "bpf_tracing_net.h"
-#define ERROR(ret) do {\
- char fmt[] = "ERROR line:%d ret:%d\n";\
- bpf_trace_printk(fmt, sizeof(fmt), __LINE__, ret); \
- } while (0)
-
-int _version SEC("version") = 1;
-
-struct geneve_opt {
- __be16 opt_class;
- __u8 type;
- __u8 length:5;
- __u8 r3:1;
- __u8 r2:1;
- __u8 r1:1;
- __u8 opt_data[8]; /* hard-coded to 8 byte */
-};
+#define log_err(__ret) bpf_printk("ERROR line:%d ret:%d\n", __LINE__, __ret)
+
+#define VXLAN_UDP_PORT 4789
+#define ETH_P_IP 0x0800
+#define PACKET_HOST 0
+#define TUNNEL_CSUM bpf_htons(0x01)
+#define TUNNEL_KEY bpf_htons(0x04)
+
+/* Only IPv4 address assigned to veth1.
+ * 172.16.1.200
+ */
+#define ASSIGNED_ADDR_VETH1 0xac1001c8
-struct vxlan_metadata {
- __u32 gbp;
+struct bpf_fou_encap___local {
+ __be16 sport;
+ __be16 dport;
+} __attribute__((preserve_access_index));
+
+enum bpf_fou_encap_type___local {
+ FOU_BPF_ENCAP_FOU___local,
+ FOU_BPF_ENCAP_GUE___local,
};
-SEC("gre_set_tunnel")
-int _gre_set_tunnel(struct __sk_buff *skb)
+struct bpf_fou_encap;
+
+int bpf_skb_set_fou_encap(struct __sk_buff *skb_ctx,
+ struct bpf_fou_encap *encap, int type) __ksym;
+int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx,
+ struct bpf_fou_encap *encap) __ksym;
+struct xfrm_state *
+bpf_xdp_get_xfrm_state(struct xdp_md *ctx, struct bpf_xfrm_state_opts *opts,
+ u32 opts__sz) __ksym;
+void bpf_xdp_xfrm_state_release(struct xfrm_state *x) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} local_ip_map SEC(".maps");
+
+SEC("tc")
+int gre_set_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
@@ -58,32 +69,52 @@ int _gre_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX | BPF_F_SEQ_NUMBER);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("gre_get_tunnel")
-int _gre_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int gre_set_tunnel_no_key(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ key.tunnel_ttl = 64;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+ BPF_F_ZERO_CSUM_TX | BPF_F_SEQ_NUMBER |
+ BPF_F_NO_TUNNEL_KEY);
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("tc")
+int gre_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
- char fmt[] = "key %d remote ip 0x%x\n";
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt), key.tunnel_id, key.remote_ipv4);
+ bpf_printk("key %d remote ip 0x%x\n", key.tunnel_id, key.remote_ipv4);
return TC_ACT_OK;
}
-SEC("ip6gretap_set_tunnel")
-int _ip6gretap_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6gretap_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
int ret;
@@ -99,35 +130,34 @@ int _ip6gretap_set_tunnel(struct __sk_buff *skb)
BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
BPF_F_SEQ_NUMBER);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ip6gretap_get_tunnel")
-int _ip6gretap_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6gretap_get_tunnel(struct __sk_buff *skb)
{
- char fmt[] = "key %d remote ip6 ::%x label %x\n";
struct bpf_tunnel_key key;
int ret;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv6[3], key.tunnel_label);
+ bpf_printk("key %d remote ip6 ::%x label %x\n",
+ key.tunnel_id, key.remote_ipv6[3], key.tunnel_label);
return TC_ACT_OK;
}
-SEC("erspan_set_tunnel")
-int _erspan_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int erspan_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct erspan_metadata md;
@@ -142,7 +172,7 @@ int _erspan_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
@@ -155,63 +185,58 @@ int _erspan_set_tunnel(struct __sk_buff *skb)
__u8 hwid = 7;
md.version = 2;
- md.u.md2.dir = direction;
- md.u.md2.hwid = hwid & 0xf;
- md.u.md2.hwid_upper = (hwid >> 4) & 0x3;
+ BPF_CORE_WRITE_BITFIELD(&md.u.md2, dir, direction);
+ BPF_CORE_WRITE_BITFIELD(&md.u.md2, hwid, (hwid & 0xf));
+ BPF_CORE_WRITE_BITFIELD(&md.u.md2, hwid_upper, (hwid >> 4) & 0x3);
#endif
ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("erspan_get_tunnel")
-int _erspan_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int erspan_get_tunnel(struct __sk_buff *skb)
{
- char fmt[] = "key %d remote ip 0x%x erspan version %d\n";
struct bpf_tunnel_key key;
struct erspan_metadata md;
- __u32 index;
int ret;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv4, md.version);
+ bpf_printk("key %d remote ip 0x%x erspan version %d\n",
+ key.tunnel_id, key.remote_ipv4, md.version);
#ifdef ERSPAN_V1
- char fmt2[] = "\tindex %x\n";
-
index = bpf_ntohl(md.u.index);
- bpf_trace_printk(fmt2, sizeof(fmt2), index);
+ bpf_printk("\tindex %x\n", index);
#else
- char fmt2[] = "\tdirection %d hwid %x timestamp %u\n";
-
- bpf_trace_printk(fmt2, sizeof(fmt2),
- md.u.md2.dir,
- (md.u.md2.hwid_upper << 4) + md.u.md2.hwid,
- bpf_ntohl(md.u.md2.timestamp));
+ bpf_printk("\tdirection %d hwid %x timestamp %u\n",
+ BPF_CORE_READ_BITFIELD(&md.u.md2, dir),
+ (BPF_CORE_READ_BITFIELD(&md.u.md2, hwid_upper) << 4) +
+ BPF_CORE_READ_BITFIELD(&md.u.md2, hwid),
+ bpf_ntohl(md.u.md2.timestamp));
#endif
return TC_ACT_OK;
}
-SEC("ip4ip6erspan_set_tunnel")
-int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct erspan_metadata md;
@@ -226,7 +251,7 @@ int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
@@ -240,70 +265,113 @@ int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
__u8 hwid = 17;
md.version = 2;
- md.u.md2.dir = direction;
- md.u.md2.hwid = hwid & 0xf;
- md.u.md2.hwid_upper = (hwid >> 4) & 0x3;
+ BPF_CORE_WRITE_BITFIELD(&md.u.md2, dir, direction);
+ BPF_CORE_WRITE_BITFIELD(&md.u.md2, hwid, (hwid & 0xf));
+ BPF_CORE_WRITE_BITFIELD(&md.u.md2, hwid_upper, (hwid >> 4) & 0x3);
#endif
ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ip4ip6erspan_get_tunnel")
-int _ip4ip6erspan_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip4ip6erspan_get_tunnel(struct __sk_buff *skb)
{
- char fmt[] = "ip6erspan get key %d remote ip6 ::%x erspan version %d\n";
struct bpf_tunnel_key key;
struct erspan_metadata md;
- __u32 index;
int ret;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv4, md.version);
+ bpf_printk("ip6erspan get key %d remote ip6 ::%x erspan version %d\n",
+ key.tunnel_id, key.remote_ipv4, md.version);
#ifdef ERSPAN_V1
- char fmt2[] = "\tindex %x\n";
-
index = bpf_ntohl(md.u.index);
- bpf_trace_printk(fmt2, sizeof(fmt2), index);
+ bpf_printk("\tindex %x\n", index);
#else
- char fmt2[] = "\tdirection %d hwid %x timestamp %u\n";
-
- bpf_trace_printk(fmt2, sizeof(fmt2),
- md.u.md2.dir,
- (md.u.md2.hwid_upper << 4) + md.u.md2.hwid,
- bpf_ntohl(md.u.md2.timestamp));
+ bpf_printk("\tdirection %d hwid %x timestamp %u\n",
+ BPF_CORE_READ_BITFIELD(&md.u.md2, dir),
+ (BPF_CORE_READ_BITFIELD(&md.u.md2, hwid_upper) << 4) +
+ BPF_CORE_READ_BITFIELD(&md.u.md2, hwid),
+ bpf_ntohl(md.u.md2.timestamp));
#endif
return TC_ACT_OK;
}
-SEC("vxlan_set_tunnel")
-int _vxlan_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int vxlan_set_tunnel_dst(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key;
+ struct vxlan_metadata md;
+ __u32 index = 0;
+ __u32 *local_ip = NULL;
+ int ret = 0;
+
+ local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
+ if (!local_ip) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.local_ipv4 = 0xac100164; /* 172.16.1.100 */
+ key.remote_ipv4 = *local_ip;
+ key.tunnel_id = 2;
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+ BPF_F_ZERO_CSUM_TX);
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ md.gbp = 0x800FF; /* Set VXLAN Group Policy extension */
+ ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("tc")
+int vxlan_set_tunnel_src(struct __sk_buff *skb)
{
- int ret;
struct bpf_tunnel_key key;
struct vxlan_metadata md;
+ __u32 index = 0;
+ __u32 *local_ip = NULL;
+ int ret = 0;
+
+ local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
+ if (!local_ip) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
__builtin_memset(&key, 0x0, sizeof(key));
+ key.local_ipv4 = *local_ip;
key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
key.tunnel_id = 2;
key.tunnel_tos = 0;
@@ -312,53 +380,154 @@ int _vxlan_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
md.gbp = 0x800FF; /* Set VXLAN Group Policy extension */
ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("vxlan_get_tunnel")
-int _vxlan_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int vxlan_get_tunnel_src(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
struct vxlan_metadata md;
- char fmt[] = "key %d remote ip 0x%x vxlan gbp 0x%x\n";
- ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
+ BPF_F_TUNINFO_FLAGS);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ if (key.local_ipv4 != ASSIGNED_ADDR_VETH1 || md.gbp != 0x800FF ||
+ !(key.tunnel_flags & TUNNEL_KEY) ||
+ (key.tunnel_flags & TUNNEL_CSUM)) {
+ bpf_printk("vxlan key %d local ip 0x%x remote ip 0x%x gbp 0x%x flags 0x%x\n",
+ key.tunnel_id, key.local_ipv4,
+ key.remote_ipv4, md.gbp,
+ bpf_ntohs(key.tunnel_flags));
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("tc")
+int veth_set_outer_dst(struct __sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)(long)skb->data;
+ __u32 assigned_ip = bpf_htonl(ASSIGNED_ADDR_VETH1);
+ void *data_end = (void *)(long)skb->data_end;
+ struct udphdr *udph;
+ struct iphdr *iph;
+ int ret = 0;
+ __s64 csum;
+
+ if ((void *)eth + sizeof(*eth) > data_end) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ if (eth->h_proto != bpf_htons(ETH_P_IP))
+ return TC_ACT_OK;
+
+ iph = (struct iphdr *)(eth + 1);
+ if ((void *)iph + sizeof(*iph) > data_end) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+ if (iph->protocol != IPPROTO_UDP)
+ return TC_ACT_OK;
+
+ udph = (struct udphdr *)(iph + 1);
+ if ((void *)udph + sizeof(*udph) > data_end) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+ if (udph->dest != bpf_htons(VXLAN_UDP_PORT))
+ return TC_ACT_OK;
+
+ if (iph->daddr != assigned_ip) {
+ csum = bpf_csum_diff(&iph->daddr, sizeof(__u32), &assigned_ip,
+ sizeof(__u32), 0);
+ if (bpf_skb_store_bytes(skb, ETH_HLEN + offsetof(struct iphdr, daddr),
+ &assigned_ip, sizeof(__u32), 0) < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+ if (bpf_l3_csum_replace(skb, ETH_HLEN + offsetof(struct iphdr, check),
+ 0, csum, 0) < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+ bpf_skb_change_type(skb, PACKET_HOST);
+ }
+ return TC_ACT_OK;
+}
+
+SEC("tc")
+int ip6vxlan_set_tunnel_dst(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key;
+ __u32 index = 0;
+ __u32 *local_ip;
+ int ret = 0;
+
+ local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
+ if (!local_ip) {
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv4, md.gbp);
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.local_ipv6[3] = bpf_htonl(0x11); /* ::11 */
+ key.remote_ipv6[3] = bpf_htonl(*local_ip);
+ key.tunnel_id = 22;
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+ BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
return TC_ACT_OK;
}
-SEC("ip6vxlan_set_tunnel")
-int _ip6vxlan_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6vxlan_set_tunnel_src(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
- int ret;
+ __u32 index = 0;
+ __u32 *local_ip;
+ int ret = 0;
+
+ local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
+ if (!local_ip) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
__builtin_memset(&key, 0x0, sizeof(key));
+ key.local_ipv6[3] = bpf_htonl(*local_ip);
key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
key.tunnel_id = 22;
key.tunnel_tos = 0;
@@ -367,39 +536,61 @@ int _ip6vxlan_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ip6vxlan_get_tunnel")
-int _ip6vxlan_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6vxlan_get_tunnel_src(struct __sk_buff *skb)
{
- char fmt[] = "key %d remote ip6 ::%x label %x\n";
struct bpf_tunnel_key key;
- int ret;
+ __u32 index = 0;
+ __u32 *local_ip;
+ int ret = 0;
+
+ local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
+ if (!local_ip) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
- BPF_F_TUNINFO_IPV6);
+ BPF_F_TUNINFO_IPV6 | BPF_F_TUNINFO_FLAGS);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv6[3], key.tunnel_label);
+ if (bpf_ntohl(key.local_ipv6[3]) != *local_ip ||
+ !(key.tunnel_flags & TUNNEL_KEY) ||
+ !(key.tunnel_flags & TUNNEL_CSUM)) {
+ bpf_printk("ip6vxlan key %d local ip6 ::%x remote ip6 ::%x label 0x%x flags 0x%x\n",
+ key.tunnel_id, bpf_ntohl(key.local_ipv6[3]),
+ bpf_ntohl(key.remote_ipv6[3]), key.tunnel_label,
+ bpf_ntohs(key.tunnel_flags));
+ bpf_printk("local_ip 0x%x\n", *local_ip);
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
return TC_ACT_OK;
}
-SEC("geneve_set_tunnel")
-int _geneve_set_tunnel(struct __sk_buff *skb)
+struct local_geneve_opt {
+ struct geneve_opt gopt;
+ int data;
+};
+
+SEC("tc")
+int geneve_set_tunnel(struct __sk_buff *skb)
{
- int ret, ret2;
+ int ret;
struct bpf_tunnel_key key;
- struct geneve_opt gopt;
+ struct local_geneve_opt local_gopt;
+ struct geneve_opt *gopt = (struct geneve_opt *) &local_gopt;
__builtin_memset(&key, 0x0, sizeof(key));
key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
@@ -407,61 +598,59 @@ int _geneve_set_tunnel(struct __sk_buff *skb)
key.tunnel_tos = 0;
key.tunnel_ttl = 64;
- __builtin_memset(&gopt, 0x0, sizeof(gopt));
- gopt.opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */
- gopt.type = 0x08;
- gopt.r1 = 0;
- gopt.r2 = 0;
- gopt.r3 = 0;
- gopt.length = 2; /* 4-byte multiple */
- *(int *) &gopt.opt_data = bpf_htonl(0xdeadbeef);
+ __builtin_memset(gopt, 0x0, sizeof(local_gopt));
+ gopt->opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */
+ gopt->type = 0x08;
+ gopt->r1 = 0;
+ gopt->r2 = 0;
+ gopt->r3 = 0;
+ gopt->length = 2; /* 4-byte multiple */
+ *(int *) &gopt->opt_data = bpf_htonl(0xdeadbeef);
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt));
+ ret = bpf_skb_set_tunnel_opt(skb, gopt, sizeof(local_gopt));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("geneve_get_tunnel")
-int _geneve_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int geneve_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
struct geneve_opt gopt;
- char fmt[] = "key %d remote ip 0x%x geneve class 0x%x\n";
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
- if (ret < 0) {
- ERROR(ret);
- return TC_ACT_SHOT;
- }
+ if (ret < 0)
+ gopt.opt_class = 0;
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv4, gopt.opt_class);
+ bpf_printk("key %d remote ip 0x%x geneve class 0x%x\n",
+ key.tunnel_id, key.remote_ipv4, gopt.opt_class);
return TC_ACT_OK;
}
-SEC("ip6geneve_set_tunnel")
-int _ip6geneve_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6geneve_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
- struct geneve_opt gopt;
+ struct local_geneve_opt local_gopt;
+ struct geneve_opt *gopt = (struct geneve_opt *) &local_gopt;
int ret;
__builtin_memset(&key, 0x0, sizeof(key));
@@ -473,32 +662,31 @@ int _ip6geneve_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- __builtin_memset(&gopt, 0x0, sizeof(gopt));
- gopt.opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */
- gopt.type = 0x08;
- gopt.r1 = 0;
- gopt.r2 = 0;
- gopt.r3 = 0;
- gopt.length = 2; /* 4-byte multiple */
- *(int *) &gopt.opt_data = bpf_htonl(0xfeedbeef);
+ __builtin_memset(gopt, 0x0, sizeof(local_gopt));
+ gopt->opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */
+ gopt->type = 0x08;
+ gopt->r1 = 0;
+ gopt->r2 = 0;
+ gopt->r3 = 0;
+ gopt->length = 2; /* 4-byte multiple */
+ *(int *) &gopt->opt_data = bpf_htonl(0xfeedbeef);
- ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt));
+ ret = bpf_skb_set_tunnel_opt(skb, gopt, sizeof(gopt));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ip6geneve_get_tunnel")
-int _ip6geneve_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6geneve_get_tunnel(struct __sk_buff *skb)
{
- char fmt[] = "key %d remote ip 0x%x geneve class 0x%x\n";
struct bpf_tunnel_key key;
struct geneve_opt gopt;
int ret;
@@ -506,208 +694,333 @@ int _ip6geneve_get_tunnel(struct __sk_buff *skb)
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
+ if (ret < 0)
+ gopt.opt_class = 0;
+
+ bpf_printk("key %d remote ip 0x%x geneve class 0x%x\n",
+ key.tunnel_id, key.remote_ipv4, gopt.opt_class);
+
+ return TC_ACT_OK;
+}
+
+SEC("tc")
+int ipip_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key = {};
+ void *data = (void *)(long)skb->data;
+ struct iphdr *iph = data;
+ void *data_end = (void *)(long)skb->data_end;
+ int ret;
+
+ /* single length check */
+ if (data + sizeof(*iph) > data_end) {
+ log_err(1);
+ return TC_ACT_SHOT;
+ }
+
+ key.tunnel_ttl = 64;
+ if (iph->protocol == IPPROTO_ICMP) {
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ }
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv4, gopt.opt_class);
+ return TC_ACT_OK;
+}
+SEC("tc")
+int ipip_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_printk("remote ip 0x%x\n", key.remote_ipv4);
return TC_ACT_OK;
}
-SEC("ipip_set_tunnel")
-int _ipip_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ipip_gue_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
+ struct bpf_fou_encap___local encap = {};
void *data = (void *)(long)skb->data;
struct iphdr *iph = data;
- struct tcphdr *tcp = data + sizeof(*iph);
void *data_end = (void *)(long)skb->data_end;
int ret;
- /* single length check */
- if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
- ERROR(1);
+ if (data + sizeof(*iph) > data_end) {
+ log_err(1);
return TC_ACT_SHOT;
}
key.tunnel_ttl = 64;
- if (iph->protocol == IPPROTO_ICMP) {
+ if (iph->protocol == IPPROTO_ICMP)
key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
- } else {
- if (iph->protocol != IPPROTO_TCP || iph->ihl != 5)
- return TC_ACT_SHOT;
- if (tcp->dest == bpf_htons(5200))
- key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
- else if (tcp->dest == bpf_htons(5201))
- key.remote_ipv4 = 0xac100165; /* 172.16.1.101 */
- else
- return TC_ACT_SHOT;
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ encap.sport = 0;
+ encap.dport = bpf_htons(5555);
+
+ ret = bpf_skb_set_fou_encap(skb, (struct bpf_fou_encap *)&encap,
+ bpf_core_enum_value(enum bpf_fou_encap_type___local,
+ FOU_BPF_ENCAP_GUE___local));
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
}
+ return TC_ACT_OK;
+}
+
+SEC("tc")
+int ipip_fou_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key = {};
+ struct bpf_fou_encap___local encap = {};
+ void *data = (void *)(long)skb->data;
+ struct iphdr *iph = data;
+ void *data_end = (void *)(long)skb->data_end;
+ int ret;
+
+ if (data + sizeof(*iph) > data_end) {
+ log_err(1);
+ return TC_ACT_SHOT;
+ }
+
+ key.tunnel_ttl = 64;
+ if (iph->protocol == IPPROTO_ICMP)
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ encap.sport = 0;
+ encap.dport = bpf_htons(5555);
+
+ ret = bpf_skb_set_fou_encap(skb, (struct bpf_fou_encap *)&encap,
+ FOU_BPF_ENCAP_FOU___local);
+ if (ret < 0) {
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ipip_get_tunnel")
-int _ipip_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ipip_encap_get_tunnel(struct __sk_buff *skb)
{
int ret;
- struct bpf_tunnel_key key;
- char fmt[] = "remote ip 0x%x\n";
+ struct bpf_tunnel_key key = {};
+ struct bpf_fou_encap___local encap = {};
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt), key.remote_ipv4);
+ ret = bpf_skb_get_fou_encap(skb, (struct bpf_fou_encap *)&encap);
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ if (bpf_ntohs(encap.dport) != 5555)
+ return TC_ACT_SHOT;
+
+ bpf_printk("%d remote ip 0x%x, sport %d, dport %d\n", ret,
+ key.remote_ipv4, bpf_ntohs(encap.sport),
+ bpf_ntohs(encap.dport));
return TC_ACT_OK;
}
-SEC("ipip6_set_tunnel")
-int _ipip6_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ipip6_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
void *data = (void *)(long)skb->data;
struct iphdr *iph = data;
- struct tcphdr *tcp = data + sizeof(*iph);
void *data_end = (void *)(long)skb->data_end;
int ret;
/* single length check */
- if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
- ERROR(1);
+ if (data + sizeof(*iph) > data_end) {
+ log_err(1);
return TC_ACT_SHOT;
}
__builtin_memset(&key, 0x0, sizeof(key));
- key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
key.tunnel_ttl = 64;
+ if (iph->protocol == IPPROTO_ICMP) {
+ key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
+ }
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ipip6_get_tunnel")
-int _ipip6_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ipip6_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
- char fmt[] = "remote ip6 %x::%x\n";
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt), bpf_htonl(key.remote_ipv6[0]),
- bpf_htonl(key.remote_ipv6[3]));
+ bpf_printk("remote ip6 %x::%x\n", bpf_htonl(key.remote_ipv6[0]),
+ bpf_htonl(key.remote_ipv6[3]));
return TC_ACT_OK;
}
-SEC("ip6ip6_set_tunnel")
-int _ip6ip6_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6ip6_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
void *data = (void *)(long)skb->data;
struct ipv6hdr *iph = data;
- struct tcphdr *tcp = data + sizeof(*iph);
void *data_end = (void *)(long)skb->data_end;
int ret;
/* single length check */
- if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
- ERROR(1);
+ if (data + sizeof(*iph) > data_end) {
+ log_err(1);
return TC_ACT_SHOT;
}
- key.remote_ipv6[0] = bpf_htonl(0x2401db00);
key.tunnel_ttl = 64;
-
if (iph->nexthdr == 58 /* NEXTHDR_ICMP */) {
- key.remote_ipv6[3] = bpf_htonl(1);
- } else {
- if (iph->nexthdr != 6 /* NEXTHDR_TCP */) {
- ERROR(iph->nexthdr);
- return TC_ACT_SHOT;
- }
-
- if (tcp->dest == bpf_htons(5200)) {
- key.remote_ipv6[3] = bpf_htonl(1);
- } else if (tcp->dest == bpf_htons(5201)) {
- key.remote_ipv6[3] = bpf_htonl(2);
- } else {
- ERROR(tcp->dest);
- return TC_ACT_SHOT;
- }
+ key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
}
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ip6ip6_get_tunnel")
-int _ip6ip6_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6ip6_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
- char fmt[] = "remote ip6 %x::%x\n";
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt), bpf_htonl(key.remote_ipv6[0]),
- bpf_htonl(key.remote_ipv6[3]));
+ bpf_printk("remote ip6 %x::%x\n", bpf_htonl(key.remote_ipv6[0]),
+ bpf_htonl(key.remote_ipv6[3]));
return TC_ACT_OK;
}
-SEC("xfrm_get_state")
-int _xfrm_get_state(struct __sk_buff *skb)
+volatile int xfrm_reqid = 0;
+volatile int xfrm_spi = 0;
+volatile int xfrm_remote_ip = 0;
+
+SEC("tc")
+int xfrm_get_state(struct __sk_buff *skb)
{
struct bpf_xfrm_state x;
- char fmt[] = "reqid %d spi 0x%x remote ip 0x%x\n";
int ret;
ret = bpf_skb_get_xfrm_state(skb, 0, &x, sizeof(x), 0);
if (ret < 0)
return TC_ACT_OK;
- bpf_trace_printk(fmt, sizeof(fmt), x.reqid, bpf_ntohl(x.spi),
- bpf_ntohl(x.remote_ipv4));
+ xfrm_reqid = x.reqid;
+ xfrm_spi = bpf_ntohl(x.spi);
+ xfrm_remote_ip = bpf_ntohl(x.remote_ipv4);
+
return TC_ACT_OK;
}
+volatile int xfrm_replay_window = 0;
+
+SEC("xdp")
+int xfrm_get_state_xdp(struct xdp_md *xdp)
+{
+ struct bpf_xfrm_state_opts opts = {};
+ struct xfrm_state *x = NULL;
+ struct ip_esp_hdr *esph;
+ struct bpf_dynptr ptr;
+ u8 esph_buf[8] = {};
+ u8 iph_buf[20] = {};
+ struct iphdr *iph;
+ u32 off;
+
+ if (bpf_dynptr_from_xdp(xdp, 0, &ptr))
+ goto out;
+
+ off = sizeof(struct ethhdr);
+ iph = bpf_dynptr_slice(&ptr, off, iph_buf, sizeof(iph_buf));
+ if (!iph || iph->protocol != IPPROTO_ESP)
+ goto out;
+
+ off += sizeof(struct iphdr);
+ esph = bpf_dynptr_slice(&ptr, off, esph_buf, sizeof(esph_buf));
+ if (!esph)
+ goto out;
+
+ opts.netns_id = BPF_F_CURRENT_NETNS;
+ opts.daddr.a4 = iph->daddr;
+ opts.spi = esph->spi;
+ opts.proto = IPPROTO_ESP;
+ opts.family = AF_INET;
+
+ x = bpf_xdp_get_xfrm_state(xdp, &opts, sizeof(opts));
+ if (!x)
+ goto out;
+
+ if (!x->replay_esn)
+ goto out;
+
+ xfrm_replay_window = x->replay_esn->replay_window;
+out:
+ if (x)
+ bpf_xdp_xfrm_state_release(x);
+ return XDP_PASS;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/progs/test_unpriv_bpf_disabled.c
new file mode 100644
index 000000000000..fc423e43a3cd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_unpriv_bpf_disabled.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+__u32 perfbuf_val = 0;
+__u32 ringbuf_val = 0;
+
+int test_pid;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} percpu_array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} hash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} percpu_hash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+} perfbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 1 << 12);
+} ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} prog_array SEC(".maps");
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int sys_nanosleep_enter(void *ctx)
+{
+ int cur_pid;
+
+ cur_pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (cur_pid != test_pid)
+ return 0;
+
+ bpf_perf_event_output(ctx, &perfbuf, BPF_F_CURRENT_CPU, &perfbuf_val, sizeof(perfbuf_val));
+ bpf_ringbuf_output(&ringbuf, &ringbuf_val, sizeof(ringbuf_val), 0);
+
+ return 0;
+}
+
+SEC("perf_event")
+int handle_perf_event(void *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_uprobe.c b/tools/testing/selftests/bpf/progs/test_uprobe.c
new file mode 100644
index 000000000000..896c88a4960d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_uprobe.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Hengqi Chen */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+pid_t my_pid = 0;
+
+int test1_result = 0;
+int test2_result = 0;
+int test3_result = 0;
+int test4_result = 0;
+
+SEC("uprobe/./liburandom_read.so:urandlib_api_sameoffset")
+int BPF_UPROBE(test1)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != my_pid)
+ return 0;
+
+ test1_result = 1;
+ return 0;
+}
+
+SEC("uprobe/./liburandom_read.so:urandlib_api_sameoffset@LIBURANDOM_READ_1.0.0")
+int BPF_UPROBE(test2)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != my_pid)
+ return 0;
+
+ test2_result = 1;
+ return 0;
+}
+
+SEC("uretprobe/./liburandom_read.so:urandlib_api_sameoffset@@LIBURANDOM_READ_2.0.0")
+int BPF_URETPROBE(test3, int ret)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != my_pid)
+ return 0;
+
+ test3_result = ret;
+ return 0;
+}
+
+SEC("uprobe")
+int BPF_UPROBE(test4)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != my_pid)
+ return 0;
+
+ test4_result = 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c b/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c
new file mode 100644
index 000000000000..da4bf89d004c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+int uprobe_byname_parm1 = 0;
+int uprobe_byname_ran = 0;
+int uretprobe_byname_rc = 0;
+int uretprobe_byname_ret = 0;
+int uretprobe_byname_ran = 0;
+u64 uprobe_byname2_parm1 = 0;
+int uprobe_byname2_ran = 0;
+u64 uretprobe_byname2_rc = 0;
+int uretprobe_byname2_ran = 0;
+
+int test_pid;
+
+int a[8];
+
+/* This program cannot auto-attach, but that should not stop other
+ * programs from attaching.
+ */
+SEC("uprobe")
+int handle_uprobe_noautoattach(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("uprobe//proc/self/exe:autoattach_trigger_func")
+int BPF_UPROBE(handle_uprobe_byname
+ , int arg1
+ , int arg2
+ , int arg3
+#if FUNC_REG_ARG_CNT > 3
+ , int arg4
+#endif
+#if FUNC_REG_ARG_CNT > 4
+ , int arg5
+#endif
+#if FUNC_REG_ARG_CNT > 5
+ , int arg6
+#endif
+#if FUNC_REG_ARG_CNT > 6
+ , int arg7
+#endif
+#if FUNC_REG_ARG_CNT > 7
+ , int arg8
+#endif
+)
+{
+ uprobe_byname_parm1 = PT_REGS_PARM1_CORE(ctx);
+ uprobe_byname_ran = 1;
+
+ a[0] = arg1;
+ a[1] = arg2;
+ a[2] = arg3;
+#if FUNC_REG_ARG_CNT > 3
+ a[3] = arg4;
+#endif
+#if FUNC_REG_ARG_CNT > 4
+ a[4] = arg5;
+#endif
+#if FUNC_REG_ARG_CNT > 5
+ a[5] = arg6;
+#endif
+#if FUNC_REG_ARG_CNT > 6
+ a[6] = arg7;
+#endif
+#if FUNC_REG_ARG_CNT > 7
+ a[7] = arg8;
+#endif
+ return 0;
+}
+
+SEC("uretprobe//proc/self/exe:autoattach_trigger_func")
+int BPF_URETPROBE(handle_uretprobe_byname, int ret)
+{
+ uretprobe_byname_rc = PT_REGS_RC_CORE(ctx);
+ uretprobe_byname_ret = ret;
+ uretprobe_byname_ran = 2;
+
+ return 0;
+}
+
+
+SEC("uprobe/libc.so.6:fopen")
+int BPF_UPROBE(handle_uprobe_byname2, const char *pathname, const char *mode)
+{
+ int pid = bpf_get_current_pid_tgid() >> 32;
+
+ /* ignore irrelevant invocations */
+ if (test_pid != pid)
+ return 0;
+ uprobe_byname2_parm1 = (u64)(long)pathname;
+ uprobe_byname2_ran = 3;
+ return 0;
+}
+
+SEC("uretprobe/libc.so.6:fopen")
+int BPF_URETPROBE(handle_uretprobe_byname2, void *ret)
+{
+ int pid = bpf_get_current_pid_tgid() >> 32;
+
+ /* ignore irrelevant invocations */
+ if (test_pid != pid)
+ return 0;
+ uretprobe_byname2_rc = (u64)(long)ret;
+ uretprobe_byname2_ran = 4;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_urandom_usdt.c b/tools/testing/selftests/bpf/progs/test_urandom_usdt.c
new file mode 100644
index 000000000000..3539b02bd5f7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_urandom_usdt.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+int urand_pid;
+
+int urand_read_without_sema_call_cnt;
+int urand_read_without_sema_buf_sz_sum;
+
+SEC("usdt/./urandom_read:urand:read_without_sema")
+int BPF_USDT(urand_read_without_sema, int iter_num, int iter_cnt, int buf_sz)
+{
+ if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&urand_read_without_sema_call_cnt, 1);
+ __sync_fetch_and_add(&urand_read_without_sema_buf_sz_sum, buf_sz);
+
+ return 0;
+}
+
+int urand_read_with_sema_call_cnt;
+int urand_read_with_sema_buf_sz_sum;
+
+SEC("usdt/./urandom_read:urand:read_with_sema")
+int BPF_USDT(urand_read_with_sema, int iter_num, int iter_cnt, int buf_sz)
+{
+ if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&urand_read_with_sema_call_cnt, 1);
+ __sync_fetch_and_add(&urand_read_with_sema_buf_sz_sum, buf_sz);
+
+ return 0;
+}
+
+int urandlib_read_without_sema_call_cnt;
+int urandlib_read_without_sema_buf_sz_sum;
+
+SEC("usdt/./liburandom_read.so:urandlib:read_without_sema")
+int BPF_USDT(urandlib_read_without_sema, int iter_num, int iter_cnt, int buf_sz)
+{
+ if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&urandlib_read_without_sema_call_cnt, 1);
+ __sync_fetch_and_add(&urandlib_read_without_sema_buf_sz_sum, buf_sz);
+
+ return 0;
+}
+
+int urandlib_read_with_sema_call_cnt;
+int urandlib_read_with_sema_buf_sz_sum;
+
+SEC("usdt/./liburandom_read.so:urandlib:read_with_sema")
+int BPF_USDT(urandlib_read_with_sema, int iter_num, int iter_cnt, int buf_sz)
+{
+ if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&urandlib_read_with_sema_call_cnt, 1);
+ __sync_fetch_and_add(&urandlib_read_with_sema_buf_sz_sum, buf_sz);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_usdt.c b/tools/testing/selftests/bpf/progs/test_usdt.c
new file mode 100644
index 000000000000..096488f47fbc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_usdt.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+int my_pid;
+
+int usdt0_called;
+u64 usdt0_cookie;
+int usdt0_arg_cnt;
+int usdt0_arg_ret;
+int usdt0_arg_size;
+
+SEC("usdt")
+int usdt0(struct pt_regs *ctx)
+{
+ long tmp;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt0_called, 1);
+
+ usdt0_cookie = bpf_usdt_cookie(ctx);
+ usdt0_arg_cnt = bpf_usdt_arg_cnt(ctx);
+ /* should return -ENOENT for any arg_num */
+ usdt0_arg_ret = bpf_usdt_arg(ctx, bpf_get_prandom_u32(), &tmp);
+ usdt0_arg_size = bpf_usdt_arg_size(ctx, bpf_get_prandom_u32());
+ return 0;
+}
+
+int usdt3_called;
+u64 usdt3_cookie;
+int usdt3_arg_cnt;
+int usdt3_arg_rets[3];
+u64 usdt3_args[3];
+int usdt3_arg_sizes[3];
+
+SEC("usdt//proc/self/exe:test:usdt3")
+int usdt3(struct pt_regs *ctx)
+{
+ long tmp;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt3_called, 1);
+
+ usdt3_cookie = bpf_usdt_cookie(ctx);
+ usdt3_arg_cnt = bpf_usdt_arg_cnt(ctx);
+
+ usdt3_arg_rets[0] = bpf_usdt_arg(ctx, 0, &tmp);
+ usdt3_args[0] = (int)tmp;
+ usdt3_arg_sizes[0] = bpf_usdt_arg_size(ctx, 0);
+
+ usdt3_arg_rets[1] = bpf_usdt_arg(ctx, 1, &tmp);
+ usdt3_args[1] = (long)tmp;
+ usdt3_arg_sizes[1] = bpf_usdt_arg_size(ctx, 1);
+
+ usdt3_arg_rets[2] = bpf_usdt_arg(ctx, 2, &tmp);
+ usdt3_args[2] = (uintptr_t)tmp;
+ usdt3_arg_sizes[2] = bpf_usdt_arg_size(ctx, 2);
+
+ return 0;
+}
+
+int usdt12_called;
+u64 usdt12_cookie;
+int usdt12_arg_cnt;
+u64 usdt12_args[12];
+int usdt12_arg_sizes[12];
+
+SEC("usdt//proc/self/exe:test:usdt12")
+int BPF_USDT(usdt12, int a1, int a2, long a3, long a4, unsigned a5,
+ long a6, __u64 a7, uintptr_t a8, int a9, short a10,
+ short a11, signed char a12)
+{
+ int i;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt12_called, 1);
+
+ usdt12_cookie = bpf_usdt_cookie(ctx);
+ usdt12_arg_cnt = bpf_usdt_arg_cnt(ctx);
+
+ usdt12_args[0] = a1;
+ usdt12_args[1] = a2;
+ usdt12_args[2] = a3;
+ usdt12_args[3] = a4;
+ usdt12_args[4] = a5;
+ usdt12_args[5] = a6;
+ usdt12_args[6] = a7;
+ usdt12_args[7] = a8;
+ usdt12_args[8] = a9;
+ usdt12_args[9] = a10;
+ usdt12_args[10] = a11;
+ usdt12_args[11] = a12;
+
+ bpf_for(i, 0, 12) {
+ usdt12_arg_sizes[i] = bpf_usdt_arg_size(ctx, i);
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_usdt_multispec.c b/tools/testing/selftests/bpf/progs/test_usdt_multispec.c
new file mode 100644
index 000000000000..962f3462066a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_usdt_multispec.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+/* this file is linked together with test_usdt.c to validate that usdt.bpf.h
+ * can be included in multiple .bpf.c files forming single final BPF object
+ * file
+ */
+
+extern int my_pid;
+
+int usdt_100_called;
+int usdt_100_sum;
+
+SEC("usdt//proc/self/exe:test:usdt_100")
+int BPF_USDT(usdt_100, int x)
+{
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt_100_called, 1);
+ __sync_fetch_and_add(&usdt_100_sum, x);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_user_ringbuf.h b/tools/testing/selftests/bpf/progs/test_user_ringbuf.h
new file mode 100644
index 000000000000..1643b4d59ba7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_user_ringbuf.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _TEST_USER_RINGBUF_H
+#define _TEST_USER_RINGBUF_H
+
+#define TEST_OP_64 4
+#define TEST_OP_32 2
+
+enum test_msg_op {
+ TEST_MSG_OP_INC64,
+ TEST_MSG_OP_INC32,
+ TEST_MSG_OP_MUL64,
+ TEST_MSG_OP_MUL32,
+
+ // Must come last.
+ TEST_MSG_OP_NUM_OPS,
+};
+
+struct test_msg {
+ enum test_msg_op msg_op;
+ union {
+ __s64 operand_64;
+ __s32 operand_32;
+ };
+};
+
+struct sample {
+ int pid;
+ int seq;
+ long value;
+ char comm[16];
+};
+
+#endif /* _TEST_USER_RINGBUF_H */
diff --git a/tools/testing/selftests/bpf/progs/test_varlen.c b/tools/testing/selftests/bpf/progs/test_varlen.c
index 913acdffd90f..20eb7d422c41 100644
--- a/tools/testing/selftests/bpf/progs/test_varlen.c
+++ b/tools/testing/selftests/bpf/progs/test_varlen.c
@@ -19,6 +19,7 @@ __u64 payload1_len1 = 0;
__u64 payload1_len2 = 0;
__u64 total1 = 0;
char payload1[MAX_LEN + MAX_LEN] = {};
+__u64 ret_bad_read = 0;
/* .data */
int payload2_len1 = -1;
@@ -36,31 +37,35 @@ int payload4_len2 = -1;
int total4= -1;
char payload4[MAX_LEN + MAX_LEN] = { 1 };
+char payload_bad[5] = { 0x42, 0x42, 0x42, 0x42, 0x42 };
+
SEC("raw_tp/sys_enter")
int handler64_unsigned(void *regs)
{
int pid = bpf_get_current_pid_tgid() >> 32;
void *payload = payload1;
- u64 len;
+ long len;
/* ignore irrelevant invocations */
if (test_pid != pid || !capture)
return 0;
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
- if (len <= MAX_LEN) {
+ if (len >= 0) {
payload += len;
payload1_len1 = len;
}
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
- if (len <= MAX_LEN) {
+ if (len >= 0) {
payload += len;
payload1_len2 = len;
}
total1 = payload - (void *)payload1;
+ ret_bad_read = bpf_probe_read_kernel_str(payload_bad + 2, 1, (void *) -1);
+
return 0;
}
@@ -123,7 +128,7 @@ int handler32_signed(void *regs)
{
int pid = bpf_get_current_pid_tgid() >> 32;
void *payload = payload4;
- int len;
+ long len;
/* ignore irrelevant invocations */
if (test_pid != pid || !capture)
diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale1.c b/tools/testing/selftests/bpf/progs/test_verif_scale1.c
index d38153dab3dd..323a73fb2e8c 100644
--- a/tools/testing/selftests/bpf/progs/test_verif_scale1.c
+++ b/tools/testing/selftests/bpf/progs/test_verif_scale1.c
@@ -5,13 +5,13 @@
#define ATTR __attribute__((noinline))
#include "test_jhash.h"
-SEC("scale90_noinline")
+SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
void *ptr;
- int ret = 0, nh_off, i = 0;
+ int nh_off, i = 0;
nh_off = 14;
diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale2.c b/tools/testing/selftests/bpf/progs/test_verif_scale2.c
index f024154c7be7..f5318f757084 100644
--- a/tools/testing/selftests/bpf/progs/test_verif_scale2.c
+++ b/tools/testing/selftests/bpf/progs/test_verif_scale2.c
@@ -1,17 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#define ATTR __always_inline
#include "test_jhash.h"
-SEC("scale90_inline")
+SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
void *ptr;
- int ret = 0, nh_off, i = 0;
+ int nh_off, i = 0;
nh_off = 14;
diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale3.c b/tools/testing/selftests/bpf/progs/test_verif_scale3.c
index 9beb5bf80373..2e06dbb1ad5c 100644
--- a/tools/testing/selftests/bpf/progs/test_verif_scale3.c
+++ b/tools/testing/selftests/bpf/progs/test_verif_scale3.c
@@ -5,13 +5,13 @@
#define ATTR __attribute__((noinline))
#include "test_jhash.h"
-SEC("scale90_noinline32")
+SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
void *ptr;
- int ret = 0, nh_off, i = 0;
+ int nh_off, i = 0;
nh_off = 32;
diff --git a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
new file mode 100644
index 000000000000..ff8d755548b9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_kfuncs.h"
+#include "err.h"
+
+#define MAX_DATA_SIZE (1024 * 1024)
+#define MAX_SIG_SIZE 1024
+
+__u32 monitored_pid;
+__s32 user_keyring_serial;
+__u64 system_keyring_id;
+
+struct data {
+ __u8 data[MAX_DATA_SIZE];
+ __u32 data_len;
+ __u8 sig[MAX_SIG_SIZE];
+ __u32 sig_len;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct data);
+} data_input SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+SEC("lsm.s/bpf")
+int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ struct bpf_dynptr data_ptr, sig_ptr;
+ struct data *data_val;
+ struct bpf_key *trusted_keyring;
+ __u32 pid;
+ __u64 value;
+ int ret, zero = 0;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ data_val = bpf_map_lookup_elem(&data_input, &zero);
+ if (!data_val)
+ return 0;
+
+ ret = bpf_probe_read_kernel(&value, sizeof(value), &attr->value);
+ if (ret)
+ goto out;
+
+ ret = bpf_copy_from_user(data_val, sizeof(struct data),
+ (void *)(unsigned long)value);
+ if (ret)
+ goto out;
+
+ if (data_val->data_len > sizeof(data_val->data))
+ return -EINVAL;
+
+ bpf_dynptr_from_mem(data_val->data, data_val->data_len, 0, &data_ptr);
+
+ if (data_val->sig_len > sizeof(data_val->sig))
+ return -EINVAL;
+
+ bpf_dynptr_from_mem(data_val->sig, data_val->sig_len, 0, &sig_ptr);
+
+ if (user_keyring_serial)
+ trusted_keyring = bpf_lookup_user_key(user_keyring_serial, 0);
+ else
+ trusted_keyring = bpf_lookup_system_key(system_keyring_id);
+
+ if (!trusted_keyring)
+ return -ENOENT;
+
+ ret = bpf_verify_pkcs7_signature(&data_ptr, &sig_ptr, trusted_keyring);
+
+ bpf_key_put(trusted_keyring);
+
+out:
+ set_if_not_errno_or_zero(ret, -EFAULT);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c
index e9dfa0313d1b..78b23934d9f8 100644
--- a/tools/testing/selftests/bpf/progs/test_vmlinux.c
+++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c
@@ -16,12 +16,12 @@ bool kprobe_called = false;
bool fentry_called = false;
SEC("tp/syscalls/sys_enter_nanosleep")
-int handle__tp(struct trace_event_raw_sys_enter *args)
+int handle__tp(struct syscall_trace_enter *args)
{
struct __kernel_timespec *ts;
long tv_nsec;
- if (args->id != __NR_nanosleep)
+ if (args->nr != __NR_nanosleep)
return 0;
ts = (void *)args->args[0];
@@ -42,7 +42,7 @@ int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
if (id != __NR_nanosleep)
return 0;
- ts = (void *)PT_REGS_PARM1_CORE(regs);
+ ts = (void *)PT_REGS_PARM1_CORE_SYSCALL(regs);
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
@@ -60,7 +60,7 @@ int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
if (id != __NR_nanosleep)
return 0;
- ts = (void *)PT_REGS_PARM1_CORE(regs);
+ ts = (void *)PT_REGS_PARM1_CORE_SYSCALL(regs);
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c
index 31f9bce37491..8caf58be5818 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp.c
@@ -19,8 +19,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_iptunnel_common.h"
-
-int _version SEC("version") = 1;
+#include "bpf_compiler.h"
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
@@ -139,7 +138,7 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp)
iph->ttl = 8;
next_iph = (__u16 *)iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < sizeof(*iph) >> 1; i++)
csum += *next_iph++;
@@ -210,7 +209,7 @@ static __always_inline int handle_ipv6(struct xdp_md *xdp)
return XDP_TX;
}
-SEC("xdp_tx_iptunnel")
+SEC("xdp")
int _xdp_tx_iptunnel(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
index 3d66599eee2e..5904f45cfbc4 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
@@ -2,25 +2,41 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-SEC("xdp_adjust_tail_grow")
+SEC("xdp")
int _xdp_adjust_tail_grow(struct xdp_md *xdp)
{
- void *data_end = (void *)(long)xdp->data_end;
- void *data = (void *)(long)xdp->data;
- unsigned int data_len;
+ int data_len = bpf_xdp_get_buff_len(xdp);
int offset = 0;
+ /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */
+#if defined(__TARGET_ARCH_s390)
+ int tailroom = 512;
+#elif defined(__TARGET_ARCH_powerpc)
+ int tailroom = 384;
+#else
+ int tailroom = 320;
+#endif
/* Data length determine test case */
- data_len = data_end - data;
if (data_len == 54) { /* sizeof(pkt_v4) */
- offset = 4096; /* test too large offset */
+ offset = 4096; /* test too large offset, 4k page size */
+ } else if (data_len == 53) { /* sizeof(pkt_v4) - 1 */
+ offset = 65536; /* test too large offset, 64k page size */
} else if (data_len == 74) { /* sizeof(pkt_v6) */
offset = 40;
} else if (data_len == 64) {
offset = 128;
} else if (data_len == 128) {
- offset = 4096 - 256 - 320 - data_len; /* Max tail grow 3520 */
+ /* Max tail grow 3520 */
+ offset = 4096 - 256 - tailroom - data_len;
+ } else if (data_len == 9000) {
+ offset = 10;
+ } else if (data_len == 9001) {
+ offset = 4096;
+ } else if (data_len == 90000) {
+ offset = 10; /* test a small offset, 64k page size */
+ } else if (data_len == 90001) {
+ offset = 65536; /* test too large offset, 64k page size */
} else {
return XDP_ABORTED; /* No matching test */
}
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c
index 22065a9cfb25..ca68c038357c 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c
@@ -9,19 +9,41 @@
#include <linux/if_ether.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
-SEC("xdp_adjust_tail_shrink")
+SEC("xdp")
int _xdp_adjust_tail_shrink(struct xdp_md *xdp)
{
- void *data_end = (void *)(long)xdp->data_end;
- void *data = (void *)(long)xdp->data;
+ __u8 *data_end = (void *)(long)xdp->data_end;
+ __u8 *data = (void *)(long)xdp->data;
int offset = 0;
- if (data_end - data == 54) /* sizeof(pkt_v4) */
+ switch (bpf_xdp_get_buff_len(xdp)) {
+ case 54:
+ /* sizeof(pkt_v4) */
offset = 256; /* shrink too much */
- else
+ break;
+ case 9000:
+ /* non-linear buff test cases */
+ if (data + 1 > data_end)
+ return XDP_DROP;
+
+ switch (data[0]) {
+ case 0:
+ offset = 10;
+ break;
+ case 1:
+ offset = 4100;
+ break;
+ case 2:
+ offset = 8200;
+ break;
+ default:
+ return XDP_DROP;
+ }
+ break;
+ default:
offset = 20;
+ break;
+ }
if (bpf_xdp_adjust_tail(xdp, 0 - offset))
return XDP_DROP;
return XDP_TX;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_attach_fail.c b/tools/testing/selftests/bpf/progs/test_xdp_attach_fail.c
new file mode 100644
index 000000000000..2ff1b596e87e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_xdp_attach_fail.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Leon Hwang */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+#define ERRMSG_LEN 64
+
+struct xdp_errmsg {
+ char msg[ERRMSG_LEN];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __type(key, int);
+ __type(value, int);
+} xdp_errmsg_pb SEC(".maps");
+
+struct xdp_attach_error_ctx {
+ unsigned long unused;
+
+ /*
+ * bpf does not support tracepoint __data_loc directly.
+ *
+ * Actually, this field is a 32 bit integer whose value encodes
+ * information on where to find the actual data. The first 2 bytes is
+ * the size of the data. The last 2 bytes is the offset from the start
+ * of the tracepoint struct where the data begins.
+ * -- https://github.com/iovisor/bpftrace/pull/1542
+ */
+ __u32 msg; // __data_loc char[] msg;
+};
+
+/*
+ * Catch the error message at the tracepoint.
+ */
+
+SEC("tp/xdp/bpf_xdp_link_attach_failed")
+int tp__xdp__bpf_xdp_link_attach_failed(struct xdp_attach_error_ctx *ctx)
+{
+ char *msg = (void *)(__u64) ((void *) ctx + (__u16) ctx->msg);
+ struct xdp_errmsg errmsg = {};
+
+ bpf_probe_read_kernel_str(&errmsg.msg, ERRMSG_LEN, msg);
+ bpf_perf_event_output(ctx, &xdp_errmsg_pb, BPF_F_CURRENT_CPU, &errmsg,
+ ERRMSG_LEN);
+ return 0;
+}
+
+/*
+ * Reuse the XDP program in xdp_dummy.c.
+ */
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
index a038e827f850..ee48c4963971 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
@@ -36,8 +36,8 @@ struct meta {
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} perf_buf_map SEC(".maps");
__u64 test_result_fentry = 0;
@@ -45,11 +45,9 @@ SEC("fentry/FUNC")
int BPF_PROG(trace_on_entry, struct xdp_buff *xdp)
{
struct meta meta;
- void *data_end = (void *)(long)xdp->data_end;
- void *data = (void *)(long)xdp->data;
meta.ifindex = xdp->rxq->dev->ifindex;
- meta.pkt_len = data_end - data;
+ meta.pkt_len = bpf_xdp_get_buff_len((struct xdp_md *)xdp);
bpf_xdp_output(xdp, &perf_buf_map,
((__u64) meta.pkt_len << 32) |
BPF_F_CURRENT_CPU,
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_context_test_run.c b/tools/testing/selftests/bpf/progs/test_xdp_context_test_run.c
new file mode 100644
index 000000000000..d7b88cd05afd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_xdp_context_test_run.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("xdp")
+int xdp_context(struct xdp_md *xdp)
+{
+ void *data = (void *)(long)xdp->data;
+ __u32 *metadata = (void *)(long)xdp->data_meta;
+ __u32 ret;
+
+ if (metadata + 1 > data)
+ return XDP_ABORTED;
+ ret = *metadata;
+ if (bpf_xdp_adjust_meta(xdp, 4))
+ return XDP_ABORTED;
+ return ret;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c
index b360ba2bd441..807bf895f42c 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c
@@ -5,7 +5,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-SEC("xdp_dm_log")
+SEC("xdp")
int xdpdm_devlog(struct xdp_md *ctx)
{
char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c
new file mode 100644
index 000000000000..5928ed0911ca
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+#define ETH_ALEN 6
+#define HDR_SZ (sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + sizeof(struct udphdr))
+
+/**
+ * enum frame_mark - magics to distinguish page/packet paths
+ * @MARK_XMIT: page was recycled due to the frame being "xmitted" by the NIC.
+ * @MARK_IN: frame is being processed by the input XDP prog.
+ * @MARK_SKB: frame did hit the TC ingress hook as an skb.
+ */
+enum frame_mark {
+ MARK_XMIT = 0U,
+ MARK_IN = 0x42,
+ MARK_SKB = 0x45,
+};
+
+const volatile int ifindex_out;
+const volatile int ifindex_in;
+const volatile __u8 expect_dst[ETH_ALEN];
+volatile int pkts_seen_xdp = 0;
+volatile int pkts_seen_zero = 0;
+volatile int pkts_seen_tc = 0;
+volatile int retcode = XDP_REDIRECT;
+
+SEC("xdp")
+int xdp_redirect(struct xdp_md *xdp)
+{
+ __u32 *metadata = (void *)(long)xdp->data_meta;
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+
+ __u8 *payload = data + HDR_SZ;
+ int ret = retcode;
+
+ if (payload + 1 > data_end)
+ return XDP_ABORTED;
+
+ if (xdp->ingress_ifindex != (__u32)ifindex_in)
+ return XDP_ABORTED;
+
+ if (metadata + 1 > data)
+ return XDP_ABORTED;
+
+ if (*metadata != 0x42)
+ return XDP_ABORTED;
+
+ if (*payload == MARK_XMIT)
+ pkts_seen_zero++;
+
+ *payload = MARK_IN;
+
+ if (bpf_xdp_adjust_meta(xdp, sizeof(__u64)))
+ return XDP_ABORTED;
+
+ if (retcode > XDP_PASS)
+ retcode--;
+
+ if (ret == XDP_REDIRECT)
+ return bpf_redirect(ifindex_out, 0);
+
+ return ret;
+}
+
+static bool check_pkt(void *data, void *data_end, const __u32 mark)
+{
+ struct ipv6hdr *iph = data + sizeof(struct ethhdr);
+ __u8 *payload = data + HDR_SZ;
+
+ if (payload + 1 > data_end)
+ return false;
+
+ if (iph->nexthdr != IPPROTO_UDP || *payload != MARK_IN)
+ return false;
+
+ /* reset the payload so the same packet doesn't get counted twice when
+ * it cycles back through the kernel path and out the dst veth
+ */
+ *payload = mark;
+ return true;
+}
+
+SEC("xdp")
+int xdp_count_pkts(struct xdp_md *xdp)
+{
+ void *data = (void *)(long)xdp->data;
+ void *data_end = (void *)(long)xdp->data_end;
+
+ if (check_pkt(data, data_end, MARK_XMIT))
+ pkts_seen_xdp++;
+
+ /* Return %XDP_DROP to recycle the data page with %MARK_XMIT, like
+ * it exited a physical NIC. Those pages will be counted in the
+ * pkts_seen_zero counter above.
+ */
+ return XDP_DROP;
+}
+
+SEC("xdp")
+int xdp_redirect_to_111(struct xdp_md *xdp)
+{
+ return bpf_redirect(111, 0);
+}
+
+SEC("xdp")
+int xdp_redirect_to_222(struct xdp_md *xdp)
+{
+ return bpf_redirect(222, 0);
+}
+
+SEC("tc")
+int tc_count_pkts(struct __sk_buff *skb)
+{
+ void *data = (void *)(long)skb->data;
+ void *data_end = (void *)(long)skb->data_end;
+
+ if (check_pkt(data, data_end, MARK_SKB))
+ pkts_seen_tc++;
+
+ /* Will be either recycled or freed, %MARK_SKB makes sure it won't
+ * hit any of the counters above.
+ */
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c
new file mode 100644
index 000000000000..67a77944ef29
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta */
+#include <stddef.h>
+#include <string.h>
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/pkt_cls.h>
+#include <sys/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include "test_iptunnel_common.h"
+#include "bpf_kfuncs.h"
+
+#define tcphdr_sz sizeof(struct tcphdr)
+#define udphdr_sz sizeof(struct udphdr)
+#define ethhdr_sz sizeof(struct ethhdr)
+#define iphdr_sz sizeof(struct iphdr)
+#define ipv6hdr_sz sizeof(struct ipv6hdr)
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 256);
+ __type(key, __u32);
+ __type(value, __u64);
+} rxcnt SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, MAX_IPTNL_ENTRIES);
+ __type(key, struct vip);
+ __type(value, struct iptnl_info);
+} vip2tnl SEC(".maps");
+
+static __always_inline void count_tx(__u32 protocol)
+{
+ __u64 *rxcnt_count;
+
+ rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol);
+ if (rxcnt_count)
+ *rxcnt_count += 1;
+}
+
+static __always_inline int get_dport(void *trans_data, __u8 protocol)
+{
+ struct tcphdr *th;
+ struct udphdr *uh;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ th = (struct tcphdr *)trans_data;
+ return th->dest;
+ case IPPROTO_UDP:
+ uh = (struct udphdr *)trans_data;
+ return uh->dest;
+ default:
+ return 0;
+ }
+}
+
+static __always_inline void set_ethhdr(struct ethhdr *new_eth,
+ const struct ethhdr *old_eth,
+ const struct iptnl_info *tnl,
+ __be16 h_proto)
+{
+ memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source));
+ memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest));
+ new_eth->h_proto = h_proto;
+}
+
+static __always_inline int handle_ipv4(struct xdp_md *xdp, struct bpf_dynptr *xdp_ptr)
+{
+ __u8 eth_buffer[ethhdr_sz + iphdr_sz + ethhdr_sz];
+ __u8 iph_buffer_tcp[iphdr_sz + tcphdr_sz];
+ __u8 iph_buffer_udp[iphdr_sz + udphdr_sz];
+ struct bpf_dynptr new_xdp_ptr;
+ struct iptnl_info *tnl;
+ struct ethhdr *new_eth;
+ struct ethhdr *old_eth;
+ struct iphdr *iph;
+ __u16 *next_iph;
+ __u16 payload_len;
+ struct vip vip = {};
+ int dport;
+ __u32 csum = 0;
+ int i;
+
+ __builtin_memset(eth_buffer, 0, sizeof(eth_buffer));
+ __builtin_memset(iph_buffer_tcp, 0, sizeof(iph_buffer_tcp));
+ __builtin_memset(iph_buffer_udp, 0, sizeof(iph_buffer_udp));
+
+ if (ethhdr_sz + iphdr_sz + tcphdr_sz > xdp->data_end - xdp->data)
+ iph = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, iph_buffer_udp, sizeof(iph_buffer_udp));
+ else
+ iph = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, iph_buffer_tcp, sizeof(iph_buffer_tcp));
+
+ if (!iph)
+ return XDP_DROP;
+
+ dport = get_dport(iph + 1, iph->protocol);
+ if (dport == -1)
+ return XDP_DROP;
+
+ vip.protocol = iph->protocol;
+ vip.family = AF_INET;
+ vip.daddr.v4 = iph->daddr;
+ vip.dport = dport;
+ payload_len = bpf_ntohs(iph->tot_len);
+
+ tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
+ /* It only does v4-in-v4 */
+ if (!tnl || tnl->family != AF_INET)
+ return XDP_PASS;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)iphdr_sz))
+ return XDP_DROP;
+
+ bpf_dynptr_from_xdp(xdp, 0, &new_xdp_ptr);
+ new_eth = bpf_dynptr_slice_rdwr(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer));
+ if (!new_eth)
+ return XDP_DROP;
+
+ iph = (struct iphdr *)(new_eth + 1);
+ old_eth = (struct ethhdr *)(iph + 1);
+
+ set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IP));
+
+ if (new_eth == eth_buffer)
+ bpf_dynptr_write(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer), 0);
+
+ iph->version = 4;
+ iph->ihl = iphdr_sz >> 2;
+ iph->frag_off = 0;
+ iph->protocol = IPPROTO_IPIP;
+ iph->check = 0;
+ iph->tos = 0;
+ iph->tot_len = bpf_htons(payload_len + iphdr_sz);
+ iph->daddr = tnl->daddr.v4;
+ iph->saddr = tnl->saddr.v4;
+ iph->ttl = 8;
+
+ next_iph = (__u16 *)iph;
+ for (i = 0; i < iphdr_sz >> 1; i++)
+ csum += *next_iph++;
+
+ iph->check = ~((csum & 0xffff) + (csum >> 16));
+
+ count_tx(vip.protocol);
+
+ return XDP_TX;
+}
+
+static __always_inline int handle_ipv6(struct xdp_md *xdp, struct bpf_dynptr *xdp_ptr)
+{
+ __u8 eth_buffer[ethhdr_sz + ipv6hdr_sz + ethhdr_sz];
+ __u8 ip6h_buffer_tcp[ipv6hdr_sz + tcphdr_sz];
+ __u8 ip6h_buffer_udp[ipv6hdr_sz + udphdr_sz];
+ struct bpf_dynptr new_xdp_ptr;
+ struct iptnl_info *tnl;
+ struct ethhdr *new_eth;
+ struct ethhdr *old_eth;
+ struct ipv6hdr *ip6h;
+ __u16 payload_len;
+ struct vip vip = {};
+ int dport;
+
+ __builtin_memset(eth_buffer, 0, sizeof(eth_buffer));
+ __builtin_memset(ip6h_buffer_tcp, 0, sizeof(ip6h_buffer_tcp));
+ __builtin_memset(ip6h_buffer_udp, 0, sizeof(ip6h_buffer_udp));
+
+ if (ethhdr_sz + iphdr_sz + tcphdr_sz > xdp->data_end - xdp->data)
+ ip6h = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, ip6h_buffer_udp, sizeof(ip6h_buffer_udp));
+ else
+ ip6h = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, ip6h_buffer_tcp, sizeof(ip6h_buffer_tcp));
+
+ if (!ip6h)
+ return XDP_DROP;
+
+ dport = get_dport(ip6h + 1, ip6h->nexthdr);
+ if (dport == -1)
+ return XDP_DROP;
+
+ vip.protocol = ip6h->nexthdr;
+ vip.family = AF_INET6;
+ memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr));
+ vip.dport = dport;
+ payload_len = ip6h->payload_len;
+
+ tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
+ /* It only does v6-in-v6 */
+ if (!tnl || tnl->family != AF_INET6)
+ return XDP_PASS;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)ipv6hdr_sz))
+ return XDP_DROP;
+
+ bpf_dynptr_from_xdp(xdp, 0, &new_xdp_ptr);
+ new_eth = bpf_dynptr_slice_rdwr(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer));
+ if (!new_eth)
+ return XDP_DROP;
+
+ ip6h = (struct ipv6hdr *)(new_eth + 1);
+ old_eth = (struct ethhdr *)(ip6h + 1);
+
+ set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IPV6));
+
+ if (new_eth == eth_buffer)
+ bpf_dynptr_write(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer), 0);
+
+ ip6h->version = 6;
+ ip6h->priority = 0;
+ memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
+ ip6h->payload_len = bpf_htons(bpf_ntohs(payload_len) + ipv6hdr_sz);
+ ip6h->nexthdr = IPPROTO_IPV6;
+ ip6h->hop_limit = 8;
+ memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6));
+ memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6));
+
+ count_tx(vip.protocol);
+
+ return XDP_TX;
+}
+
+SEC("xdp")
+int _xdp_tx_iptunnel(struct xdp_md *xdp)
+{
+ __u8 buffer[ethhdr_sz];
+ struct bpf_dynptr ptr;
+ struct ethhdr *eth;
+ __u16 h_proto;
+
+ __builtin_memset(buffer, 0, sizeof(buffer));
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr);
+ eth = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
+ if (!eth)
+ return XDP_DROP;
+
+ h_proto = eth->h_proto;
+
+ if (h_proto == bpf_htons(ETH_P_IP))
+ return handle_ipv4(xdp, &ptr);
+ else if (h_proto == bpf_htons(ETH_P_IPV6))
+
+ return handle_ipv6(xdp, &ptr);
+ else
+ return XDP_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_link.c b/tools/testing/selftests/bpf/progs/test_xdp_link.c
index eb93ea95d1d8..64ff32eaae92 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_link.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_link.c
@@ -5,8 +5,14 @@
char LICENSE[] SEC("license") = "GPL";
-SEC("xdp/handler")
+SEC("xdp")
int xdp_handler(struct xdp_md *xdp)
{
return 0;
}
+
+SEC("tc")
+int tc_handler(struct __sk_buff *skb)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_loop.c b/tools/testing/selftests/bpf/progs/test_xdp_loop.c
index fcabcda30ba3..93267a68825b 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_loop.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_loop.c
@@ -15,8 +15,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_iptunnel_common.h"
-
-int _version SEC("version") = 1;
+#include "bpf_compiler.h"
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
@@ -135,7 +134,7 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp)
iph->ttl = 8;
next_iph = (__u16 *)iph;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < sizeof(*iph) >> 1; i++)
csum += *next_iph++;
@@ -206,7 +205,7 @@ static __always_inline int handle_ipv6(struct xdp_md *xdp)
return XDP_TX;
}
-SEC("xdp_tx_iptunnel")
+SEC("xdp")
int _xdp_tx_iptunnel(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
index a7c4a7d49fe6..fcf6ca14f2ea 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_meta.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
@@ -4,37 +4,50 @@
#include <bpf/bpf_helpers.h>
-#define __round_mask(x, y) ((__typeof__(x))((y) - 1))
-#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1)
+#define META_SIZE 32
+
#define ctx_ptr(ctx, mem) (void *)(unsigned long)ctx->mem
-SEC("t")
+/* Demonstrates how metadata can be passed from an XDP program to a TC program
+ * using bpf_xdp_adjust_meta.
+ * For the sake of testing the metadata support in drivers, the XDP program uses
+ * a fixed-size payload after the Ethernet header as metadata. The TC program
+ * copies the metadata it receives into a map so it can be checked from
+ * userspace.
+ */
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __uint(value_size, META_SIZE);
+} test_result SEC(".maps");
+
+SEC("tc")
int ing_cls(struct __sk_buff *ctx)
{
- __u8 *data, *data_meta, *data_end;
- __u32 diff = 0;
+ __u8 *data, *data_meta;
+ __u32 key = 0;
data_meta = ctx_ptr(ctx, data_meta);
- data_end = ctx_ptr(ctx, data_end);
data = ctx_ptr(ctx, data);
- if (data + ETH_ALEN > data_end ||
- data_meta + round_up(ETH_ALEN, 4) > data)
+ if (data_meta + META_SIZE > data)
return TC_ACT_SHOT;
- diff |= ((__u32 *)data_meta)[0] ^ ((__u32 *)data)[0];
- diff |= ((__u16 *)data_meta)[2] ^ ((__u16 *)data)[2];
+ bpf_map_update_elem(&test_result, &key, data_meta, BPF_ANY);
- return diff ? TC_ACT_SHOT : TC_ACT_OK;
+ return TC_ACT_SHOT;
}
-SEC("x")
+SEC("xdp")
int ing_xdp(struct xdp_md *ctx)
{
- __u8 *data, *data_meta, *data_end;
+ __u8 *data, *data_meta, *data_end, *payload;
+ struct ethhdr *eth;
int ret;
- ret = bpf_xdp_adjust_meta(ctx, -round_up(ETH_ALEN, 4));
+ ret = bpf_xdp_adjust_meta(ctx, -META_SIZE);
if (ret < 0)
return XDP_DROP;
@@ -42,11 +55,21 @@ int ing_xdp(struct xdp_md *ctx)
data_end = ctx_ptr(ctx, data_end);
data = ctx_ptr(ctx, data);
- if (data + ETH_ALEN > data_end ||
- data_meta + round_up(ETH_ALEN, 4) > data)
+ eth = (struct ethhdr *)data;
+ payload = data + sizeof(struct ethhdr);
+
+ if (payload + META_SIZE > data_end ||
+ data_meta + META_SIZE > data)
+ return XDP_DROP;
+
+ /* The Linux networking stack may send other packets on the test
+ * interface that interfere with the test. Just drop them.
+ * The test packets can be recognized by their ethertype of zero.
+ */
+ if (eth->h_proto != 0)
return XDP_DROP;
- __builtin_memcpy(data_meta, data, ETH_ALEN);
+ __builtin_memcpy(data_meta, payload, META_SIZE);
return XDP_PASS;
}
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 3a67921f62b5..fad94e41cef9 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
@@ -15,6 +15,7 @@
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
static __always_inline __u32 rol32(__u32 word, unsigned int shift)
{
@@ -239,7 +240,7 @@ bool parse_udp(void *data, void *data_end,
udp = data + off;
if (udp + 1 > data_end)
- return 0;
+ return false;
if (!is_icmp) {
pckt->flow.port16[0] = udp->source;
pckt->flow.port16[1] = udp->dest;
@@ -247,7 +248,7 @@ bool parse_udp(void *data, void *data_end,
pckt->flow.port16[0] = udp->dest;
pckt->flow.port16[1] = udp->source;
}
- return 1;
+ return true;
}
static __attribute__ ((noinline))
@@ -261,7 +262,7 @@ bool parse_tcp(void *data, void *data_end,
tcp = data + off;
if (tcp + 1 > data_end)
- return 0;
+ return false;
if (tcp->syn)
pckt->flags |= (1 << 1);
if (!is_icmp) {
@@ -271,7 +272,7 @@ bool parse_tcp(void *data, void *data_end,
pckt->flow.port16[0] = tcp->dest;
pckt->flow.port16[1] = tcp->source;
}
- return 1;
+ return true;
}
static __attribute__ ((noinline))
@@ -287,7 +288,7 @@ bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
void *data;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
- return 0;
+ return false;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
@@ -295,7 +296,7 @@ bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
old_eth = data + sizeof(struct ipv6hdr);
if (new_eth + 1 > data_end ||
old_eth + 1 > data_end || ip6h + 1 > data_end)
- return 0;
+ return false;
memcpy(new_eth->eth_dest, cval->mac, 6);
memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
new_eth->eth_proto = 56710;
@@ -314,9 +315,17 @@ bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
ip6h->saddr.in6_u.u6_addr32[2] = 3;
ip6h->saddr.in6_u.u6_addr32[3] = ip_suffix;
memcpy(ip6h->daddr.in6_u.u6_addr32, dst->dstv6, 16);
- return 1;
+ return true;
}
+#ifndef __clang__
+#pragma GCC push_options
+/* GCC optimization collapses functions and increases the number of arguments
+ * beyond the compatible amount supported by BPF.
+ */
+#pragma GCC optimize("-fno-ipa-sra")
+#endif
+
static __attribute__ ((noinline))
bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
struct packet_description *pckt,
@@ -335,7 +344,7 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
ip_suffix <<= 15;
ip_suffix ^= pckt->flow.src;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
- return 0;
+ return false;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
@@ -343,7 +352,7 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
old_eth = data + sizeof(struct iphdr);
if (new_eth + 1 > data_end ||
old_eth + 1 > data_end || iph + 1 > data_end)
- return 0;
+ return false;
memcpy(new_eth->eth_dest, cval->mac, 6);
memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
new_eth->eth_proto = 8;
@@ -362,53 +371,18 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
iph->ttl = 4;
next_iph_u16 = (__u16 *) iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
csum += *next_iph_u16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
- return 0;
- return 1;
+ return false;
+ return true;
}
-static __attribute__ ((noinline))
-bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4)
-{
- struct eth_hdr *new_eth;
- struct eth_hdr *old_eth;
-
- old_eth = *data;
- new_eth = *data + sizeof(struct ipv6hdr);
- memcpy(new_eth->eth_source, old_eth->eth_source, 6);
- memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
- if (inner_v4)
- new_eth->eth_proto = 8;
- else
- new_eth->eth_proto = 56710;
- if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr)))
- return 0;
- *data = (void *)(long)xdp->data;
- *data_end = (void *)(long)xdp->data_end;
- return 1;
-}
-
-static __attribute__ ((noinline))
-bool decap_v4(struct xdp_md *xdp, void **data, void **data_end)
-{
- struct eth_hdr *new_eth;
- struct eth_hdr *old_eth;
-
- old_eth = *data;
- new_eth = *data + sizeof(struct iphdr);
- memcpy(new_eth->eth_source, old_eth->eth_source, 6);
- memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
- new_eth->eth_proto = 8;
- if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
- return 0;
- *data = (void *)(long)xdp->data;
- *data_end = (void *)(long)xdp->data_end;
- return 1;
-}
+#ifndef __clang__
+#pragma GCC pop_options
+#endif
static __attribute__ ((noinline))
int swap_mac_and_send(void *data, void *data_end)
@@ -430,7 +404,6 @@ int send_icmp_reply(void *data, void *data_end)
__u16 *next_iph_u16;
__u32 tmp_addr = 0;
struct iphdr *iph;
- __u32 csum1 = 0;
__u32 csum = 0;
__u64 off = 0;
@@ -449,7 +422,7 @@ int send_icmp_reply(void *data, void *data_end)
iph->saddr = tmp_addr;
iph->check = 0;
next_iph_u16 = (__u16 *) iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
csum += *next_iph_u16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
@@ -564,22 +537,22 @@ static bool get_packet_dst(struct real_definition **real,
hash = get_packet_hash(pckt, hash_16bytes);
if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
- return 0;
+ return false;
key = 2 * vip_info->vip_num + hash % 2;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
- return 0;
+ return false;
key = *real_pos;
*real = bpf_map_lookup_elem(&reals, &key);
if (!(*real))
- return 0;
+ return false;
if (!(vip_info->flags & (1 << 1))) {
__u32 conn_rate_key = 512 + 2;
struct lb_stats *conn_rate_stats =
bpf_map_lookup_elem(&stats, &conn_rate_key);
if (!conn_rate_stats)
- return 1;
+ return true;
cur_time = bpf_ktime_get_ns();
if ((cur_time - conn_rate_stats->v2) >> 32 > 0xffFFFF) {
conn_rate_stats->v1 = 1;
@@ -587,14 +560,14 @@ static bool get_packet_dst(struct real_definition **real,
} else {
conn_rate_stats->v1 += 1;
if (conn_rate_stats->v1 >= 1)
- return 1;
+ return true;
}
if (pckt->flow.proto == IPPROTO_UDP)
new_dst_lru.atime = cur_time;
new_dst_lru.pos = key;
bpf_map_update_elem(lru_map, &pckt->flow, &new_dst_lru, 0);
}
- return 1;
+ return true;
}
__attribute__ ((noinline))
@@ -627,12 +600,13 @@ static void connection_table_lookup(struct real_definition **real,
__attribute__ ((noinline))
static int process_l3_headers_v6(struct packet_description *pckt,
__u8 *protocol, __u64 off,
- __u16 *pkt_bytes, void *data,
- void *data_end)
+ __u16 *pkt_bytes, void *extra_args[2])
{
struct ipv6hdr *ip6h;
__u64 iph_len;
int action;
+ void *data = extra_args[0];
+ void *data_end = extra_args[1];
ip6h = data + off;
if (ip6h + 1 > data_end)
@@ -658,12 +632,12 @@ static int process_l3_headers_v6(struct packet_description *pckt,
__attribute__ ((noinline))
static int process_l3_headers_v4(struct packet_description *pckt,
__u8 *protocol, __u64 off,
- __u16 *pkt_bytes, void *data,
- void *data_end)
+ __u16 *pkt_bytes, void *extra_args[2])
{
struct iphdr *iph;
- __u64 iph_len;
int action;
+ void *data = extra_args[0];
+ void *data_end = extra_args[1];
iph = data + off;
if (iph + 1 > data_end)
@@ -696,7 +670,6 @@ static int process_packet(void *data, __u64 off, void *data_end,
struct packet_description pckt = { };
struct vip_definition vip = { };
struct lb_stats *data_stats;
- struct eth_hdr *eth = data;
void *lru_map = &lru_cache;
struct vip_meta *vip_info;
__u32 lru_stats_key = 513;
@@ -704,17 +677,17 @@ static int process_packet(void *data, __u64 off, void *data_end,
__u32 stats_key = 512;
struct ctl_value *cval;
__u16 pkt_bytes;
- __u64 iph_len;
__u8 protocol;
__u32 vip_num;
int action;
+ void *extra_args[2] = { data, data_end };
if (is_ipv6)
action = process_l3_headers_v6(&pckt, &protocol, off,
- &pkt_bytes, data, data_end);
+ &pkt_bytes, extra_args);
else
action = process_l3_headers_v4(&pckt, &protocol, off,
- &pkt_bytes, data, data_end);
+ &pkt_bytes, extra_args);
if (action >= 0)
return action;
protocol = pckt.flow.proto;
@@ -797,7 +770,7 @@ out:
return XDP_DROP;
}
-SEC("xdp-test-v4")
+SEC("xdp")
int balancer_ingress_v4(struct xdp_md *ctx)
{
void *data = (void *)(long)ctx->data;
@@ -816,7 +789,7 @@ int balancer_ingress_v4(struct xdp_md *ctx)
return XDP_DROP;
}
-SEC("xdp-test-v6")
+SEC("xdp")
int balancer_ingress_v6(struct xdp_md *ctx)
{
void *data = (void *)(long)ctx->data;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c
deleted file mode 100644
index a5337cd9400b..000000000000
--- a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2017 VMware
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-
-int _version SEC("version") = 1;
-
-SEC("redirect_to_111")
-int xdp_redirect_to_111(struct xdp_md *xdp)
-{
- return bpf_redirect(111, 0);
-}
-SEC("redirect_to_222")
-int xdp_redirect_to_222(struct xdp_md *xdp)
-{
- return bpf_redirect(222, 0);
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_update_frags.c b/tools/testing/selftests/bpf/progs/test_xdp_update_frags.c
new file mode 100644
index 000000000000..2a3496d8e327
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_xdp_update_frags.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <bpf/bpf_helpers.h>
+
+int _version SEC("version") = 1;
+
+SEC("xdp.frags")
+int xdp_adjust_frags(struct xdp_md *xdp)
+{
+ __u8 *data_end = (void *)(long)xdp->data_end;
+ __u8 *data = (void *)(long)xdp->data;
+ __u8 val[16] = {};
+ __u32 offset;
+ int err;
+
+ if (data + sizeof(__u32) > data_end)
+ return XDP_DROP;
+
+ offset = *(__u32 *)data;
+ err = bpf_xdp_load_bytes(xdp, offset, val, sizeof(val));
+ if (err < 0)
+ return XDP_DROP;
+
+ if (val[0] != 0xaa || val[15] != 0xaa) /* marker */
+ return XDP_DROP;
+
+ val[0] = 0xbb; /* update the marker */
+ val[15] = 0xbb;
+ err = bpf_xdp_store_bytes(xdp, offset, val, sizeof(val));
+ if (err < 0)
+ return XDP_DROP;
+
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
index 134768f6b788..a80cc5f2f4f2 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
@@ -98,12 +98,12 @@ bool parse_eth_frame(struct ethhdr *eth, void *data_end, struct parse_pkt *pkt)
return true;
}
-/* Hint, VLANs are choosen to hit network-byte-order issues */
+/* Hint, VLANs are chosen to hit network-byte-order issues */
#define TESTVLAN 4011 /* 0xFAB */
// #define TO_VLAN 4000 /* 0xFA0 (hint 0xOA0 = 160) */
-SEC("xdp_drop_vlan_4011")
-int xdp_prognum0(struct xdp_md *ctx)
+SEC("xdp")
+int xdp_drop_vlan_4011(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
@@ -144,8 +144,8 @@ Load prog with ip tool:
/* Changing VLAN to zero, have same practical effect as removing the VLAN. */
#define TO_VLAN 0
-SEC("xdp_vlan_change")
-int xdp_prognum1(struct xdp_md *ctx)
+SEC("xdp")
+int xdp_vlan_change(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
@@ -160,7 +160,7 @@ int xdp_prognum1(struct xdp_md *ctx)
/* Modifying VLAN, preserve top 4 bits */
vlan_hdr->h_vlan_TCI =
- bpf_htons((bpf_ntohs(vlan_hdr->h_vlan_TCI) & 0xf000)
+ bpf_htons((bpf_ntohs(vlan_hdr->h_vlan_TCI) & 0xf000U)
| TO_VLAN);
}
@@ -178,8 +178,8 @@ int xdp_prognum1(struct xdp_md *ctx)
#endif
#define VLAN_HDR_SZ 4 /* bytes */
-SEC("xdp_vlan_remove_outer")
-int xdp_prognum2(struct xdp_md *ctx)
+SEC("xdp")
+int xdp_vlan_remove_outer(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
@@ -195,7 +195,7 @@ int xdp_prognum2(struct xdp_md *ctx)
/* Moving Ethernet header, dest overlap with src, memmove handle this */
dest = data;
- dest+= VLAN_HDR_SZ;
+ dest += VLAN_HDR_SZ;
/*
* Notice: Taking over vlan_hdr->h_vlan_encapsulated_proto, by
* only moving two MAC addrs (12 bytes), not overwriting last 2 bytes
@@ -210,19 +210,6 @@ int xdp_prognum2(struct xdp_md *ctx)
}
static __always_inline
-void shift_mac_4bytes_16bit(void *data)
-{
- __u16 *p = data;
-
- p[7] = p[5]; /* delete p[7] was vlan_hdr->h_vlan_TCI */
- p[6] = p[4]; /* delete p[6] was ethhdr->h_proto */
- p[5] = p[3];
- p[4] = p[2];
- p[3] = p[1];
- p[2] = p[0];
-}
-
-static __always_inline
void shift_mac_4bytes_32bit(void *data)
{
__u32 *p = data;
@@ -237,8 +224,8 @@ void shift_mac_4bytes_32bit(void *data)
p[1] = p[0];
}
-SEC("xdp_vlan_remove_outer2")
-int xdp_prognum3(struct xdp_md *ctx)
+SEC("xdp")
+int xdp_vlan_remove_outer2(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
@@ -267,8 +254,8 @@ int xdp_prognum3(struct xdp_md *ctx)
* The TC-clsact eBPF programs (currently) need to be attach via TC commands
*/
-SEC("tc_vlan_push")
-int _tc_progA(struct __sk_buff *ctx)
+SEC("tc")
+int tc_vlan_push(struct __sk_buff *ctx)
{
bpf_skb_vlan_push(ctx, bpf_htons(ETH_P_8021Q), TESTVLAN);
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_frags_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_frags_helpers.c
new file mode 100644
index 000000000000..97ed625bb70a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_frags_helpers.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+#define IFINDEX_LO 1
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CPUMAP);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_cpumap_val));
+ __uint(max_entries, 4);
+} cpu_map SEC(".maps");
+
+SEC("xdp/cpumap")
+int xdp_dummy_cm(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+SEC("xdp.frags/cpumap")
+int xdp_dummy_cm_frags(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
index 59ee4f182ff8..3619239b01b7 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
@@ -12,25 +12,36 @@ struct {
__uint(max_entries, 4);
} cpu_map SEC(".maps");
-SEC("xdp_redir")
+__u32 redirect_count = 0;
+
+SEC("xdp")
int xdp_redir_prog(struct xdp_md *ctx)
{
- return bpf_redirect_map(&cpu_map, 1, 0);
+ return bpf_redirect_map(&cpu_map, 0, 0);
}
-SEC("xdp_dummy")
+SEC("xdp")
int xdp_dummy_prog(struct xdp_md *ctx)
{
return XDP_PASS;
}
-SEC("xdp_cpumap/dummy_cm")
+SEC("xdp/cpumap")
int xdp_dummy_cm(struct xdp_md *ctx)
{
+ if (bpf_get_smp_processor_id() == 0)
+ redirect_count++;
+
if (ctx->ingress_ifindex == IFINDEX_LO)
return XDP_DROP;
return XDP_PASS;
}
+SEC("xdp.frags/cpumap")
+int xdp_dummy_cm_frags(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_frags_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_frags_helpers.c
new file mode 100644
index 000000000000..cdcf7de7ec8c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_frags_helpers.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_devmap_val));
+ __uint(max_entries, 4);
+} dm_ports SEC(".maps");
+
+/* valid program on DEVMAP entry via SEC name;
+ * has access to egress and ingress ifindex
+ */
+SEC("xdp/devmap")
+int xdp_dummy_dm(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+SEC("xdp.frags/devmap")
+int xdp_dummy_dm_frags(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
index 0ac086497722..92b65a485d4a 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
@@ -9,16 +9,16 @@ struct {
__uint(max_entries, 4);
} dm_ports SEC(".maps");
-SEC("xdp_redir")
+SEC("xdp")
int xdp_redir_prog(struct xdp_md *ctx)
{
- return bpf_redirect_map(&dm_ports, 1, 0);
+ return bpf_redirect_map(&dm_ports, 0, 0);
}
/* invalid program on DEVMAP entry;
* SEC name means expected attach type not set
*/
-SEC("xdp_dummy")
+SEC("xdp")
int xdp_dummy_prog(struct xdp_md *ctx)
{
return XDP_PASS;
@@ -27,7 +27,7 @@ int xdp_dummy_prog(struct xdp_md *ctx)
/* valid program on DEVMAP entry via SEC name;
* has access to egress and ingress ifindex
*/
-SEC("xdp_devmap/map_prog")
+SEC("xdp/devmap")
int xdp_dummy_dm(struct xdp_md *ctx)
{
char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
@@ -40,4 +40,11 @@ int xdp_dummy_dm(struct xdp_md *ctx)
return XDP_PASS;
}
+
+SEC("xdp.frags/devmap")
+int xdp_dummy_dm_frags(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/timer.c b/tools/testing/selftests/bpf/progs/timer.c
new file mode 100644
index 000000000000..4c677c001258
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <linux/bpf.h>
+#include <time.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+struct hmap_elem {
+ int counter;
+ struct bpf_timer timer;
+ struct bpf_spin_lock lock; /* unused */
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1000);
+ __type(key, int);
+ __type(value, struct hmap_elem);
+} hmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, 1000);
+ __type(key, int);
+ __type(value, struct hmap_elem);
+} hmap_malloc SEC(".maps");
+
+struct elem {
+ struct bpf_timer t;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, 4);
+ __type(key, int);
+ __type(value, struct elem);
+} lru SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"),
+ race_array SEC(".maps");
+
+__u64 bss_data;
+__u64 abs_data;
+__u64 err;
+__u64 ok;
+__u64 callback_check = 52;
+__u64 callback2_check = 52;
+__u64 pinned_callback_check;
+__s32 pinned_cpu;
+
+#define ARRAY 1
+#define HTAB 2
+#define HTAB_MALLOC 3
+#define LRU 4
+
+/* callback for array and lru timers */
+static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
+{
+ /* increment bss variable twice.
+ * Once via array timer callback and once via lru timer callback
+ */
+ bss_data += 5;
+
+ /* *key == 0 - the callback was called for array timer.
+ * *key == 4 - the callback was called from lru timer.
+ */
+ if (*key == ARRAY) {
+ struct bpf_timer *lru_timer;
+ int lru_key = LRU;
+
+ /* rearm array timer to be called again in ~35 seconds */
+ if (bpf_timer_start(timer, 1ull << 35, 0) != 0)
+ err |= 1;
+
+ lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
+ if (!lru_timer)
+ return 0;
+ bpf_timer_set_callback(lru_timer, timer_cb1);
+ if (bpf_timer_start(lru_timer, 0, 0) != 0)
+ err |= 2;
+ } else if (*key == LRU) {
+ int lru_key, i;
+
+ for (i = LRU + 1;
+ i <= 100 /* for current LRU eviction algorithm this number
+ * should be larger than ~ lru->max_entries * 2
+ */;
+ i++) {
+ struct elem init = {};
+
+ /* lru_key cannot be used as loop induction variable
+ * otherwise the loop will be unbounded.
+ */
+ lru_key = i;
+
+ /* add more elements into lru map to push out current
+ * element and force deletion of this timer
+ */
+ bpf_map_update_elem(map, &lru_key, &init, 0);
+ /* look it up to bump it into active list */
+ bpf_map_lookup_elem(map, &lru_key);
+
+ /* keep adding until *key changes underneath,
+ * which means that key/timer memory was reused
+ */
+ if (*key != LRU)
+ break;
+ }
+
+ /* check that the timer was removed */
+ if (bpf_timer_cancel(timer) != -EINVAL)
+ err |= 4;
+ ok |= 1;
+ }
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG2(test1, int, a)
+{
+ struct bpf_timer *arr_timer, *lru_timer;
+ struct elem init = {};
+ int lru_key = LRU;
+ int array_key = ARRAY;
+
+ arr_timer = bpf_map_lookup_elem(&array, &array_key);
+ if (!arr_timer)
+ return 0;
+ bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
+
+ bpf_map_update_elem(&lru, &lru_key, &init, 0);
+ lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
+ if (!lru_timer)
+ return 0;
+ bpf_timer_init(lru_timer, &lru, CLOCK_MONOTONIC);
+
+ bpf_timer_set_callback(arr_timer, timer_cb1);
+ bpf_timer_start(arr_timer, 0 /* call timer_cb1 asap */, 0);
+
+ /* init more timers to check that array destruction
+ * doesn't leak timer memory.
+ */
+ array_key = 0;
+ arr_timer = bpf_map_lookup_elem(&array, &array_key);
+ if (!arr_timer)
+ return 0;
+ bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
+ return 0;
+}
+
+/* callback for prealloc and non-prealloca hashtab timers */
+static int timer_cb2(void *map, int *key, struct hmap_elem *val)
+{
+ if (*key == HTAB)
+ callback_check--;
+ else
+ callback2_check--;
+ if (val->counter > 0 && --val->counter) {
+ /* re-arm the timer again to execute after 1 usec */
+ bpf_timer_start(&val->timer, 1000, 0);
+ } else if (*key == HTAB) {
+ struct bpf_timer *arr_timer;
+ int array_key = ARRAY;
+
+ /* cancel arr_timer otherwise bpf_fentry_test1 prog
+ * will stay alive forever.
+ */
+ arr_timer = bpf_map_lookup_elem(&array, &array_key);
+ if (!arr_timer)
+ return 0;
+ if (bpf_timer_cancel(arr_timer) != 1)
+ /* bpf_timer_cancel should return 1 to indicate
+ * that arr_timer was active at this time
+ */
+ err |= 8;
+
+ /* try to cancel ourself. It shouldn't deadlock. */
+ if (bpf_timer_cancel(&val->timer) != -EDEADLK)
+ err |= 16;
+
+ /* delete this key and this timer anyway.
+ * It shouldn't deadlock either.
+ */
+ bpf_map_delete_elem(map, key);
+
+ /* in preallocated hashmap both 'key' and 'val' could have been
+ * reused to store another map element (like in LRU above),
+ * but in controlled test environment the below test works.
+ * It's not a use-after-free. The memory is owned by the map.
+ */
+ if (bpf_timer_start(&val->timer, 1000, 0) != -EINVAL)
+ err |= 32;
+ ok |= 2;
+ } else {
+ if (*key != HTAB_MALLOC)
+ err |= 64;
+
+ /* try to cancel ourself. It shouldn't deadlock. */
+ if (bpf_timer_cancel(&val->timer) != -EDEADLK)
+ err |= 128;
+
+ /* delete this key and this timer anyway.
+ * It shouldn't deadlock either.
+ */
+ bpf_map_delete_elem(map, key);
+
+ ok |= 4;
+ }
+ return 0;
+}
+
+int bpf_timer_test(void)
+{
+ struct hmap_elem *val;
+ int key = HTAB, key_malloc = HTAB_MALLOC;
+
+ val = bpf_map_lookup_elem(&hmap, &key);
+ if (val) {
+ if (bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME) != 0)
+ err |= 512;
+ bpf_timer_set_callback(&val->timer, timer_cb2);
+ bpf_timer_start(&val->timer, 1000, 0);
+ }
+ val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
+ if (val) {
+ if (bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME) != 0)
+ err |= 1024;
+ bpf_timer_set_callback(&val->timer, timer_cb2);
+ bpf_timer_start(&val->timer, 1000, 0);
+ }
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test2")
+int BPF_PROG2(test2, int, a, int, b)
+{
+ struct hmap_elem init = {}, *val;
+ int key = HTAB, key_malloc = HTAB_MALLOC;
+
+ init.counter = 10; /* number of times to trigger timer_cb2 */
+ bpf_map_update_elem(&hmap, &key, &init, 0);
+ val = bpf_map_lookup_elem(&hmap, &key);
+ if (val)
+ bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
+ /* update the same key to free the timer */
+ bpf_map_update_elem(&hmap, &key, &init, 0);
+
+ bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
+ val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
+ if (val)
+ bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
+ /* update the same key to free the timer */
+ bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
+
+ /* init more timers to check that htab operations
+ * don't leak timer memory.
+ */
+ key = 0;
+ bpf_map_update_elem(&hmap, &key, &init, 0);
+ val = bpf_map_lookup_elem(&hmap, &key);
+ if (val)
+ bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
+ bpf_map_delete_elem(&hmap, &key);
+ bpf_map_update_elem(&hmap, &key, &init, 0);
+ val = bpf_map_lookup_elem(&hmap, &key);
+ if (val)
+ bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
+
+ /* and with non-prealloc htab */
+ key_malloc = 0;
+ bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
+ val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
+ if (val)
+ bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
+ bpf_map_delete_elem(&hmap_malloc, &key_malloc);
+ bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
+ val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
+ if (val)
+ bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
+
+ return bpf_timer_test();
+}
+
+/* callback for absolute timer */
+static int timer_cb3(void *map, int *key, struct bpf_timer *timer)
+{
+ abs_data += 6;
+
+ if (abs_data < 12) {
+ bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
+ BPF_F_TIMER_ABS);
+ } else {
+ /* Re-arm timer ~35 seconds in future */
+ bpf_timer_start(timer, bpf_ktime_get_boot_ns() + (1ull << 35),
+ BPF_F_TIMER_ABS);
+ }
+
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test3")
+int BPF_PROG2(test3, int, a)
+{
+ int key = 0;
+ struct bpf_timer *timer;
+
+ bpf_printk("test3");
+
+ timer = bpf_map_lookup_elem(&abs_timer, &key);
+ if (timer) {
+ if (bpf_timer_init(timer, &abs_timer, CLOCK_BOOTTIME) != 0)
+ err |= 2048;
+ bpf_timer_set_callback(timer, timer_cb3);
+ bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
+ BPF_F_TIMER_ABS);
+ }
+
+ return 0;
+}
+
+/* callback for pinned timer */
+static int timer_cb_pinned(void *map, int *key, struct bpf_timer *timer)
+{
+ __s32 cpu = bpf_get_smp_processor_id();
+
+ if (cpu != pinned_cpu)
+ err |= 16384;
+
+ pinned_callback_check++;
+ return 0;
+}
+
+static void test_pinned_timer(bool soft)
+{
+ int key = 0;
+ void *map;
+ struct bpf_timer *timer;
+ __u64 flags = BPF_F_TIMER_CPU_PIN;
+ __u64 start_time;
+
+ if (soft) {
+ map = &soft_timer_pinned;
+ start_time = 0;
+ } else {
+ map = &abs_timer_pinned;
+ start_time = bpf_ktime_get_boot_ns();
+ flags |= BPF_F_TIMER_ABS;
+ }
+
+ timer = bpf_map_lookup_elem(map, &key);
+ if (timer) {
+ if (bpf_timer_init(timer, map, CLOCK_BOOTTIME) != 0)
+ err |= 4096;
+ bpf_timer_set_callback(timer, timer_cb_pinned);
+ pinned_cpu = bpf_get_smp_processor_id();
+ bpf_timer_start(timer, start_time + 1000, flags);
+ } else {
+ err |= 8192;
+ }
+}
+
+SEC("fentry/bpf_fentry_test4")
+int BPF_PROG2(test4, int, a)
+{
+ bpf_printk("test4");
+ test_pinned_timer(true);
+
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test5")
+int BPF_PROG2(test5, int, a)
+{
+ bpf_printk("test5");
+ test_pinned_timer(false);
+
+ return 0;
+}
+
+static int race_timer_callback(void *race_array, int *race_key, struct bpf_timer *timer)
+{
+ bpf_timer_start(timer, 1000000, 0);
+ return 0;
+}
+
+SEC("syscall")
+int race(void *ctx)
+{
+ struct bpf_timer *timer;
+ int err, race_key = 0;
+ struct elem init;
+
+ __builtin_memset(&init, 0, sizeof(struct elem));
+ bpf_map_update_elem(&race_array, &race_key, &init, BPF_ANY);
+
+ timer = bpf_map_lookup_elem(&race_array, &race_key);
+ if (!timer)
+ return 1;
+
+ err = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
+ if (err && err != -EBUSY)
+ return 1;
+
+ bpf_timer_set_callback(timer, race_timer_callback);
+ bpf_timer_start(timer, 0, 0);
+ bpf_timer_cancel(timer);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/timer_crash.c b/tools/testing/selftests/bpf/progs/timer_crash.c
new file mode 100644
index 000000000000..f8f7944e70da
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer_crash.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct map_elem {
+ struct bpf_timer timer;
+ struct bpf_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct map_elem);
+} amap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct map_elem);
+} hmap SEC(".maps");
+
+int pid = 0;
+int crash_map = 0; /* 0 for amap, 1 for hmap */
+
+SEC("fentry/do_nanosleep")
+int sys_enter(void *ctx)
+{
+ struct map_elem *e, value = {};
+ void *map = crash_map ? (void *)&hmap : (void *)&amap;
+
+ if (bpf_get_current_task_btf()->tgid != pid)
+ return 0;
+
+ *(void **)&value = (void *)0xdeadcaf3;
+
+ bpf_map_update_elem(map, &(int){0}, &value, 0);
+ /* For array map, doing bpf_map_update_elem will do a
+ * check_and_free_timer_in_array, which will trigger the crash if timer
+ * pointer was overwritten, for hmap we need to use bpf_timer_cancel.
+ */
+ if (crash_map == 1) {
+ e = bpf_map_lookup_elem(map, &(int){0});
+ if (!e)
+ return 0;
+ bpf_timer_cancel(&e->timer);
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/timer_failure.c b/tools/testing/selftests/bpf/progs/timer_failure.c
new file mode 100644
index 000000000000..5a2e9dabf1c6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer_failure.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <time.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct elem {
+ struct bpf_timer t;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} timer_map SEC(".maps");
+
+__naked __noinline __used
+static unsigned long timer_cb_ret_bad()
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 s> 1000 goto 1f;"
+ "r0 = 0;"
+ "1:"
+ "goto +0;" /* checkpoint */
+ /* async callback is expected to return 0, so branch above
+ * skipping r0 = 0; should lead to a failure, but if exit
+ * instruction doesn't enforce r0's precision, this callback
+ * will be successfully verified
+ */
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_common
+ );
+}
+
+SEC("fentry/bpf_fentry_test1")
+__log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure
+/* check that fallthrough code path marks r0 as precise */
+__msg("mark_precise: frame0: regs=r0 stack= before")
+__msg(": (85) call bpf_get_prandom_u32#7") /* anchor message */
+/* check that branch code path marks r0 as precise */
+__msg("mark_precise: frame0: regs=r0 stack= before ") __msg(": (85) call bpf_get_prandom_u32#7")
+__msg("should have been in [0, 0]")
+long BPF_PROG2(test_bad_ret, int, a)
+{
+ int key = 0;
+ struct bpf_timer *timer;
+
+ timer = bpf_map_lookup_elem(&timer_map, &key);
+ if (timer) {
+ bpf_timer_init(timer, &timer_map, CLOCK_BOOTTIME);
+ bpf_timer_set_callback(timer, timer_cb_ret_bad);
+ bpf_timer_start(timer, 1000, 0);
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/timer_lockup.c b/tools/testing/selftests/bpf/progs/timer_lockup.c
new file mode 100644
index 000000000000..3e520133281e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer_lockup.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <time.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct elem {
+ struct bpf_timer t;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} timer1_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} timer2_map SEC(".maps");
+
+int timer1_err;
+int timer2_err;
+
+static int timer_cb1(void *map, int *k, struct elem *v)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&timer2_map, &key);
+ if (timer)
+ timer2_err = bpf_timer_cancel(timer);
+
+ return 0;
+}
+
+static int timer_cb2(void *map, int *k, struct elem *v)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&timer1_map, &key);
+ if (timer)
+ timer1_err = bpf_timer_cancel(timer);
+
+ return 0;
+}
+
+SEC("tc")
+int timer1_prog(void *ctx)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&timer1_map, &key);
+ if (timer) {
+ bpf_timer_init(timer, &timer1_map, CLOCK_BOOTTIME);
+ bpf_timer_set_callback(timer, timer_cb1);
+ bpf_timer_start(timer, 1, BPF_F_TIMER_CPU_PIN);
+ }
+
+ return 0;
+}
+
+SEC("tc")
+int timer2_prog(void *ctx)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&timer2_map, &key);
+ if (timer) {
+ bpf_timer_init(timer, &timer2_map, CLOCK_BOOTTIME);
+ bpf_timer_set_callback(timer, timer_cb2);
+ bpf_timer_start(timer, 1, BPF_F_TIMER_CPU_PIN);
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/timer_mim.c b/tools/testing/selftests/bpf/progs/timer_mim.c
new file mode 100644
index 000000000000..50ebc3f68522
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer_mim.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <linux/bpf.h>
+#include <time.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+struct hmap_elem {
+ int pad; /* unused */
+ struct bpf_timer timer;
+};
+
+struct inner_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1024);
+ __type(key, int);
+ __type(value, struct hmap_elem);
+} inner_htab SEC(".maps");
+
+#define ARRAY_KEY 1
+#define HASH_KEY 1234
+
+struct outer_arr {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 2);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __array(values, struct inner_map);
+} outer_arr SEC(".maps") = {
+ .values = { [ARRAY_KEY] = &inner_htab },
+};
+
+__u64 err;
+__u64 ok;
+__u64 cnt;
+
+static int timer_cb1(void *map, int *key, struct hmap_elem *val);
+
+static int timer_cb2(void *map, int *key, struct hmap_elem *val)
+{
+ cnt++;
+ bpf_timer_set_callback(&val->timer, timer_cb1);
+ if (bpf_timer_start(&val->timer, 1000, 0))
+ err |= 1;
+ ok |= 1;
+ return 0;
+}
+
+/* callback for inner hash map */
+static int timer_cb1(void *map, int *key, struct hmap_elem *val)
+{
+ cnt++;
+ bpf_timer_set_callback(&val->timer, timer_cb2);
+ if (bpf_timer_start(&val->timer, 1000, 0))
+ err |= 2;
+ /* Do a lookup to make sure 'map' and 'key' pointers are correct */
+ bpf_map_lookup_elem(map, key);
+ ok |= 2;
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test1, int a)
+{
+ struct hmap_elem init = {};
+ struct bpf_map *inner_map;
+ struct hmap_elem *val;
+ int array_key = ARRAY_KEY;
+ int hash_key = HASH_KEY;
+
+ inner_map = bpf_map_lookup_elem(&outer_arr, &array_key);
+ if (!inner_map)
+ return 0;
+
+ bpf_map_update_elem(inner_map, &hash_key, &init, 0);
+ val = bpf_map_lookup_elem(inner_map, &hash_key);
+ if (!val)
+ return 0;
+
+ bpf_timer_init(&val->timer, inner_map, CLOCK_MONOTONIC);
+ if (bpf_timer_set_callback(&val->timer, timer_cb1))
+ err |= 4;
+ if (bpf_timer_start(&val->timer, 0, 0))
+ err |= 8;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/timer_mim_reject.c b/tools/testing/selftests/bpf/progs/timer_mim_reject.c
new file mode 100644
index 000000000000..dd3f1ed6d6e6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer_mim_reject.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <linux/bpf.h>
+#include <time.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+struct hmap_elem {
+ int pad; /* unused */
+ struct bpf_timer timer;
+};
+
+struct inner_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1024);
+ __type(key, int);
+ __type(value, struct hmap_elem);
+} inner_htab SEC(".maps");
+
+#define ARRAY_KEY 1
+#define ARRAY_KEY2 2
+#define HASH_KEY 1234
+
+struct outer_arr {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 2);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __array(values, struct inner_map);
+} outer_arr SEC(".maps") = {
+ .values = { [ARRAY_KEY] = &inner_htab },
+};
+
+__u64 err;
+__u64 ok;
+__u64 cnt;
+
+/* callback for inner hash map */
+static int timer_cb(void *map, int *key, struct hmap_elem *val)
+{
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test1, int a)
+{
+ struct hmap_elem init = {};
+ struct bpf_map *inner_map, *inner_map2;
+ struct hmap_elem *val;
+ int array_key = ARRAY_KEY;
+ int array_key2 = ARRAY_KEY2;
+ int hash_key = HASH_KEY;
+
+ inner_map = bpf_map_lookup_elem(&outer_arr, &array_key);
+ if (!inner_map)
+ return 0;
+
+ inner_map2 = bpf_map_lookup_elem(&outer_arr, &array_key2);
+ if (!inner_map2)
+ return 0;
+ bpf_map_update_elem(inner_map, &hash_key, &init, 0);
+ val = bpf_map_lookup_elem(inner_map, &hash_key);
+ if (!val)
+ return 0;
+
+ bpf_timer_init(&val->timer, inner_map2, CLOCK_MONOTONIC);
+ if (bpf_timer_set_callback(&val->timer, timer_cb))
+ err |= 4;
+ if (bpf_timer_start(&val->timer, 0, 0))
+ err |= 8;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/token_lsm.c b/tools/testing/selftests/bpf/progs/token_lsm.c
new file mode 100644
index 000000000000..a6002d073b1b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/token_lsm.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int my_pid;
+int reject_capable;
+int reject_cmd;
+
+SEC("lsm/bpf_token_capable")
+int BPF_PROG(token_capable, struct bpf_token *token, int cap)
+{
+ if (my_pid == 0 || my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+ if (reject_capable)
+ return -1;
+ return 0;
+}
+
+SEC("lsm/bpf_token_cmd")
+int BPF_PROG(token_cmd, struct bpf_token *token, enum bpf_cmd cmd)
+{
+ if (my_pid == 0 || my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+ if (reject_cmd)
+ return -1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/trace_dummy_st_ops.c b/tools/testing/selftests/bpf/progs/trace_dummy_st_ops.c
new file mode 100644
index 000000000000..00a4be9d3074
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/trace_dummy_st_ops.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int val = 0;
+
+SEC("fentry/test_1")
+int BPF_PROG(fentry_test_1, __u64 *st_ops_ctx)
+{
+ __u64 state;
+
+ /* Read the traced st_ops arg1 which is a pointer */
+ bpf_probe_read_kernel(&state, sizeof(__u64), (void *)st_ops_ctx);
+ /* Read state->val */
+ bpf_probe_read_kernel(&val, sizeof(__u32), (void *)state);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/trace_printk.c b/tools/testing/selftests/bpf/progs/trace_printk.c
index 8ca7f399b670..6695478c2b25 100644
--- a/tools/testing/selftests/bpf/progs/trace_printk.c
+++ b/tools/testing/selftests/bpf/progs/trace_printk.c
@@ -4,17 +4,18 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
int trace_printk_ret = 0;
int trace_printk_ran = 0;
-SEC("tp/raw_syscalls/sys_enter")
+const char fmt[] = "Testing,testing %d\n";
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int sys_enter(void *ctx)
{
- static const char fmt[] = "testing,testing %d\n";
-
trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt),
++trace_printk_ran);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/trace_vprintk.c b/tools/testing/selftests/bpf/progs/trace_vprintk.c
new file mode 100644
index 000000000000..969306cd4f33
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/trace_vprintk.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int null_data_vprintk_ret = 0;
+int trace_vprintk_ret = 0;
+int trace_vprintk_ran = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int sys_enter(void *ctx)
+{
+ static const char one[] = "1";
+ static const char three[] = "3";
+ static const char five[] = "5";
+ static const char seven[] = "7";
+ static const char nine[] = "9";
+ static const char f[] = "%pS\n";
+
+ /* runner doesn't search for \t, just ensure it compiles */
+ bpf_printk("\t");
+
+ trace_vprintk_ret = __bpf_vprintk("%s,%d,%s,%d,%s,%d,%s,%d,%s,%d %d\n",
+ one, 2, three, 4, five, 6, seven, 8, nine, 10, ++trace_vprintk_ran);
+
+ /* non-NULL fmt w/ NULL data should result in error */
+ null_data_vprintk_ret = bpf_trace_vprintk(f, sizeof(f), NULL, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/tracing_failure.c b/tools/testing/selftests/bpf/progs/tracing_failure.c
new file mode 100644
index 000000000000..65e485c4468c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tracing_failure.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("?fentry/bpf_spin_lock")
+int BPF_PROG(test_spin_lock, struct bpf_spin_lock *lock)
+{
+ return 0;
+}
+
+SEC("?fentry/bpf_spin_unlock")
+int BPF_PROG(test_spin_unlock, struct bpf_spin_lock *lock)
+{
+ return 0;
+}
+
+SEC("?fentry/__rcu_read_lock")
+int BPF_PROG(tracing_deny)
+{
+ return 0;
+}
+
+SEC("?fexit/do_exit")
+int BPF_PROG(fexit_noreturns)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/tracing_struct.c b/tools/testing/selftests/bpf/progs/tracing_struct.c
new file mode 100644
index 000000000000..c435a3a8328a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tracing_struct.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct bpf_testmod_struct_arg_1 {
+ int a;
+};
+struct bpf_testmod_struct_arg_2 {
+ long a;
+ long b;
+};
+
+struct bpf_testmod_struct_arg_3 {
+ int a;
+ int b[];
+};
+
+long t1_a_a, t1_a_b, t1_b, t1_c, t1_ret, t1_nregs;
+__u64 t1_reg0, t1_reg1, t1_reg2, t1_reg3;
+long t2_a, t2_b_a, t2_b_b, t2_c, t2_ret;
+long t3_a, t3_b, t3_c_a, t3_c_b, t3_ret;
+long t4_a_a, t4_b, t4_c, t4_d, t4_e_a, t4_e_b, t4_ret;
+long t5_ret;
+int t6;
+
+SEC("fentry/bpf_testmod_test_struct_arg_1")
+int BPF_PROG2(test_struct_arg_1, struct bpf_testmod_struct_arg_2, a, int, b, int, c)
+{
+ t1_a_a = a.a;
+ t1_a_b = a.b;
+ t1_b = b;
+ t1_c = c;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_1")
+int BPF_PROG2(test_struct_arg_2, struct bpf_testmod_struct_arg_2, a, int, b, int, c, int, ret)
+{
+ t1_nregs = bpf_get_func_arg_cnt(ctx);
+ /* a.a */
+ bpf_get_func_arg(ctx, 0, &t1_reg0);
+ /* a.b */
+ bpf_get_func_arg(ctx, 1, &t1_reg1);
+ /* b */
+ bpf_get_func_arg(ctx, 2, &t1_reg2);
+ t1_reg2 = (int)t1_reg2;
+ /* c */
+ bpf_get_func_arg(ctx, 3, &t1_reg3);
+ t1_reg3 = (int)t1_reg3;
+
+ t1_ret = ret;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_struct_arg_2")
+int BPF_PROG2(test_struct_arg_3, int, a, struct bpf_testmod_struct_arg_2, b, int, c)
+{
+ t2_a = a;
+ t2_b_a = b.a;
+ t2_b_b = b.b;
+ t2_c = c;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_2")
+int BPF_PROG2(test_struct_arg_4, int, a, struct bpf_testmod_struct_arg_2, b, int, c, int, ret)
+{
+ t2_ret = ret;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_struct_arg_3")
+int BPF_PROG2(test_struct_arg_5, int, a, int, b, struct bpf_testmod_struct_arg_2, c)
+{
+ t3_a = a;
+ t3_b = b;
+ t3_c_a = c.a;
+ t3_c_b = c.b;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_3")
+int BPF_PROG2(test_struct_arg_6, int, a, int, b, struct bpf_testmod_struct_arg_2, c, int, ret)
+{
+ t3_ret = ret;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_struct_arg_4")
+int BPF_PROG2(test_struct_arg_7, struct bpf_testmod_struct_arg_1, a, int, b,
+ int, c, int, d, struct bpf_testmod_struct_arg_2, e)
+{
+ t4_a_a = a.a;
+ t4_b = b;
+ t4_c = c;
+ t4_d = d;
+ t4_e_a = e.a;
+ t4_e_b = e.b;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_4")
+int BPF_PROG2(test_struct_arg_8, struct bpf_testmod_struct_arg_1, a, int, b,
+ int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret)
+{
+ t4_ret = ret;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_struct_arg_5")
+int BPF_PROG2(test_struct_arg_9)
+{
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_5")
+int BPF_PROG2(test_struct_arg_10, int, ret)
+{
+ t5_ret = ret;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_struct_arg_6")
+int BPF_PROG2(test_struct_arg_11, struct bpf_testmod_struct_arg_3 *, a)
+{
+ t6 = a->b[0];
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c b/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c
new file mode 100644
index 000000000000..4742012ace06
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct bpf_testmod_struct_arg_4 {
+ u64 a;
+ int b;
+};
+
+struct bpf_testmod_struct_arg_5 {
+ char a;
+ short b;
+ int c;
+ long d;
+};
+
+long t7_a, t7_b, t7_c, t7_d, t7_e, t7_f_a, t7_f_b, t7_ret;
+long t8_a, t8_b, t8_c, t8_d, t8_e, t8_f_a, t8_f_b, t8_g, t8_ret;
+long t9_a, t9_b, t9_c, t9_d, t9_e, t9_f, t9_g, t9_h_a, t9_h_b, t9_h_c, t9_h_d, t9_i, t9_ret;
+
+SEC("fentry/bpf_testmod_test_struct_arg_7")
+int BPF_PROG2(test_struct_many_args_1, __u64, a, void *, b, short, c, int, d,
+ void *, e, struct bpf_testmod_struct_arg_4, f)
+{
+ t7_a = a;
+ t7_b = (long)b;
+ t7_c = c;
+ t7_d = d;
+ t7_e = (long)e;
+ t7_f_a = f.a;
+ t7_f_b = f.b;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_7")
+int BPF_PROG2(test_struct_many_args_2, __u64, a, void *, b, short, c, int, d,
+ void *, e, struct bpf_testmod_struct_arg_4, f, int, ret)
+{
+ t7_ret = ret;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_struct_arg_8")
+int BPF_PROG2(test_struct_many_args_3, __u64, a, void *, b, short, c, int, d,
+ void *, e, struct bpf_testmod_struct_arg_4, f, int, g)
+{
+ t8_a = a;
+ t8_b = (long)b;
+ t8_c = c;
+ t8_d = d;
+ t8_e = (long)e;
+ t8_f_a = f.a;
+ t8_f_b = f.b;
+ t8_g = g;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_8")
+int BPF_PROG2(test_struct_many_args_4, __u64, a, void *, b, short, c, int, d,
+ void *, e, struct bpf_testmod_struct_arg_4, f, int, g,
+ int, ret)
+{
+ t8_ret = ret;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_struct_arg_9")
+int BPF_PROG2(test_struct_many_args_5, __u64, a, void *, b, short, c, int, d, void *, e,
+ char, f, short, g, struct bpf_testmod_struct_arg_5, h, long, i)
+{
+ t9_a = a;
+ t9_b = (long)b;
+ t9_c = c;
+ t9_d = d;
+ t9_e = (long)e;
+ t9_f = f;
+ t9_g = g;
+ t9_h_a = h.a;
+ t9_h_b = h.b;
+ t9_h_c = h.c;
+ t9_h_d = h.d;
+ t9_i = i;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_9")
+int BPF_PROG2(test_struct_many_args_6, __u64, a, void *, b, short, c, int, d, void *, e,
+ char, f, short, g, struct bpf_testmod_struct_arg_5, h, long, i, int, ret)
+{
+ t9_ret = ret;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c
index 9a4d09590b3d..044a6d78923e 100644
--- a/tools/testing/selftests/bpf/progs/trigger_bench.c
+++ b/tools/testing/selftests/bpf/progs/trigger_bench.c
@@ -1,54 +1,140 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
-
#include <linux/bpf.h>
#include <asm/unistd.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
-long hits = 0;
+#define CPU_MASK 255
+#define MAX_CPUS (CPU_MASK + 1) /* should match MAX_BUCKETS in benchs/bench_trigger.c */
-SEC("tp/syscalls/sys_enter_getpgid")
-int bench_trigger_tp(void *ctx)
+/* matches struct counter in bench.h */
+struct counter {
+ long value;
+} __attribute__((aligned(128)));
+
+struct counter hits[MAX_CPUS];
+
+static __always_inline void inc_counter(void)
{
- __sync_add_and_fetch(&hits, 1);
+ int cpu = bpf_get_smp_processor_id();
+
+ __sync_add_and_fetch(&hits[cpu & CPU_MASK].value, 1);
+}
+
+SEC("?uprobe")
+int bench_trigger_uprobe(void *ctx)
+{
+ inc_counter();
return 0;
}
-SEC("raw_tp/sys_enter")
-int BPF_PROG(bench_trigger_raw_tp, struct pt_regs *regs, long id)
+SEC("?uprobe.multi")
+int bench_trigger_uprobe_multi(void *ctx)
{
- if (id == __NR_getpgid)
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
-SEC("kprobe/__x64_sys_getpgid")
+const volatile int batch_iters = 0;
+
+SEC("?raw_tp")
+int trigger_count(void *ctx)
+{
+ int i;
+
+ for (i = 0; i < batch_iters; i++)
+ inc_counter();
+
+ return 0;
+}
+
+SEC("?raw_tp")
+int trigger_driver(void *ctx)
+{
+ int i;
+
+ for (i = 0; i < batch_iters; i++)
+ (void)bpf_get_numa_node_id(); /* attach point for benchmarking */
+
+ return 0;
+}
+
+extern int bpf_modify_return_test_tp(int nonce) __ksym __weak;
+
+SEC("?raw_tp")
+int trigger_driver_kfunc(void *ctx)
+{
+ int i;
+
+ for (i = 0; i < batch_iters; i++)
+ (void)bpf_modify_return_test_tp(0); /* attach point for benchmarking */
+
+ return 0;
+}
+
+SEC("?kprobe/bpf_get_numa_node_id")
int bench_trigger_kprobe(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
+ return 0;
+}
+
+SEC("?kretprobe/bpf_get_numa_node_id")
+int bench_trigger_kretprobe(void *ctx)
+{
+ inc_counter();
+ return 0;
+}
+
+SEC("?kprobe.multi/bpf_get_numa_node_id")
+int bench_trigger_kprobe_multi(void *ctx)
+{
+ inc_counter();
+ return 0;
+}
+
+SEC("?kretprobe.multi/bpf_get_numa_node_id")
+int bench_trigger_kretprobe_multi(void *ctx)
+{
+ inc_counter();
return 0;
}
-SEC("fentry/__x64_sys_getpgid")
+SEC("?fentry/bpf_get_numa_node_id")
int bench_trigger_fentry(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
-SEC("fentry.s/__x64_sys_getpgid")
-int bench_trigger_fentry_sleep(void *ctx)
+SEC("?fexit/bpf_get_numa_node_id")
+int bench_trigger_fexit(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
-SEC("fmod_ret/__x64_sys_getpgid")
+SEC("?fmod_ret/bpf_modify_return_test_tp")
int bench_trigger_fmodret(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return -22;
}
+
+SEC("?tp/bpf_test_run/bpf_trigger_tp")
+int bench_trigger_tp(void *ctx)
+{
+ inc_counter();
+ return 0;
+}
+
+SEC("?raw_tp/bpf_trigger_tp")
+int bench_trigger_rawtp(void *ctx)
+{
+ inc_counter();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/twfw.c b/tools/testing/selftests/bpf/progs/twfw.c
new file mode 100644
index 000000000000..de1b18a62b46
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/twfw.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/bpf.h>
+#include <stdint.h>
+
+#define TWFW_MAX_TIERS (64)
+/*
+ * load is successful
+ * #define TWFW_MAX_TIERS (64u)$
+ */
+
+struct twfw_tier_value {
+ unsigned long mask[1];
+};
+
+struct rule {
+ uint8_t seqnum;
+};
+
+struct rules_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, struct rule);
+ __uint(max_entries, 1);
+};
+
+struct tiers_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, struct twfw_tier_value);
+ __uint(max_entries, 1);
+};
+
+struct rules_map rules SEC(".maps");
+struct tiers_map tiers SEC(".maps");
+
+SEC("cgroup_skb/ingress")
+int twfw_verifier(struct __sk_buff* skb)
+{
+ const uint32_t key = 0;
+ const struct twfw_tier_value* tier = bpf_map_lookup_elem(&tiers, &key);
+ if (!tier)
+ return 1;
+
+ struct rule* rule = bpf_map_lookup_elem(&rules, &key);
+ if (!rule)
+ return 1;
+
+ if (rule && rule->seqnum < TWFW_MAX_TIERS) {
+ /* rule->seqnum / 64 should always be 0 */
+ unsigned long mask = tier->mask[rule->seqnum / 64];
+ if (mask)
+ return 0;
+ }
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/type_cast.c b/tools/testing/selftests/bpf/progs/type_cast.c
new file mode 100644
index 000000000000..9d808b8f4ab0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/type_cast.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} enter_id SEC(".maps");
+
+#define IFNAMSIZ 16
+
+int ifindex, ingress_ifindex;
+char name[IFNAMSIZ];
+unsigned int inum;
+unsigned int meta_len, frag0_len, kskb_len, kskb2_len;
+
+SEC("?xdp")
+int md_xdp(struct xdp_md *ctx)
+{
+ struct xdp_buff *kctx = bpf_cast_to_kern_ctx(ctx);
+ struct net_device *dev;
+
+ dev = kctx->rxq->dev;
+ ifindex = dev->ifindex;
+ inum = dev->nd_net.net->ns.inum;
+ __builtin_memcpy(name, dev->name, IFNAMSIZ);
+ ingress_ifindex = ctx->ingress_ifindex;
+ return XDP_PASS;
+}
+
+SEC("?tc")
+int md_skb(struct __sk_buff *skb)
+{
+ struct sk_buff *kskb = bpf_cast_to_kern_ctx(skb);
+ struct skb_shared_info *shared_info;
+ struct sk_buff *kskb2;
+
+ kskb_len = kskb->len;
+
+ /* Simulate the following kernel macro:
+ * #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
+ */
+ shared_info = bpf_core_cast(kskb->head + kskb->end, struct skb_shared_info);
+ meta_len = shared_info->meta_len;
+ frag0_len = shared_info->frag_list->len;
+
+ /* kskb2 should be equal to kskb */
+ kskb2 = bpf_core_cast(kskb, typeof(*kskb2));
+ kskb2_len = kskb2->len;
+ return 0;
+}
+
+SEC("?tp_btf/sys_enter")
+int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id)
+{
+ struct task_struct *task, *task_dup;
+
+ task = bpf_get_current_task_btf();
+ task_dup = bpf_core_cast(task, struct task_struct);
+ (void)bpf_task_storage_get(&enter_id, task_dup, 0, 0);
+ return 0;
+}
+
+SEC("?tracepoint/syscalls/sys_enter_nanosleep")
+int kctx_u64(void *ctx)
+{
+ u64 *kctx = bpf_core_cast(ctx, u64);
+
+ (void)kctx;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/udp_limit.c b/tools/testing/selftests/bpf/progs/udp_limit.c
index 165e3c2dd9a3..4767451b59ac 100644
--- a/tools/testing/selftests/bpf/progs/udp_limit.c
+++ b/tools/testing/selftests/bpf/progs/udp_limit.c
@@ -17,7 +17,6 @@ SEC("cgroup/sock_create")
int sock(struct bpf_sock *ctx)
{
int *sk_storage;
- __u32 key;
if (ctx->type != SOCK_DGRAM)
return 1;
@@ -46,7 +45,6 @@ SEC("cgroup/sock_release")
int sock_release(struct bpf_sock *ctx)
{
int *sk_storage;
- __u32 key;
if (ctx->type != SOCK_DGRAM)
return 1;
diff --git a/tools/testing/selftests/bpf/progs/uninit_stack.c b/tools/testing/selftests/bpf/progs/uninit_stack.c
new file mode 100644
index 000000000000..046a204c8fc6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uninit_stack.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/* Read an uninitialized value from stack at a fixed offset */
+SEC("socket")
+__naked int read_uninit_stack_fixed_off(void *ctx)
+{
+ asm volatile (" \
+ r0 = 0; \
+ /* force stack depth to be 128 */ \
+ *(u64*)(r10 - 128) = r1; \
+ r1 = *(u8 *)(r10 - 8 ); \
+ r0 += r1; \
+ r1 = *(u8 *)(r10 - 11); \
+ r1 = *(u8 *)(r10 - 13); \
+ r1 = *(u8 *)(r10 - 15); \
+ r1 = *(u16*)(r10 - 16); \
+ r1 = *(u32*)(r10 - 32); \
+ r1 = *(u64*)(r10 - 64); \
+ /* read from a spill of a wrong size, it is a separate \
+ * branch in check_stack_read_fixed_off() \
+ */ \
+ *(u32*)(r10 - 72) = r1; \
+ r1 = *(u64*)(r10 - 72); \
+ r0 = 0; \
+ exit; \
+"
+ ::: __clobber_all);
+}
+
+/* Read an uninitialized value from stack at a variable offset */
+SEC("socket")
+__naked int read_uninit_stack_var_off(void *ctx)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ /* force stack depth to be 64 */ \
+ *(u64*)(r10 - 64) = r0; \
+ r0 = -r0; \
+ /* give r0 a range [-31, -1] */ \
+ if r0 s<= -32 goto exit_%=; \
+ if r0 s>= 0 goto exit_%=; \
+ /* access stack using r0 */ \
+ r1 = r10; \
+ r1 += r0; \
+ r2 = *(u8*)(r1 + 0); \
+exit_%=: r0 = 0; \
+ exit; \
+"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+static __noinline void dummy(void) {}
+
+/* Pass a pointer to uninitialized stack memory to a helper.
+ * Passed memory block should be marked as STACK_MISC after helper call.
+ */
+SEC("socket")
+__log_level(7) __msg("fp-104=mmmmmmmm")
+__naked int helper_uninit_to_misc(void *ctx)
+{
+ asm volatile (" \
+ /* force stack depth to be 128 */ \
+ *(u64*)(r10 - 128) = r1; \
+ r1 = r10; \
+ r1 += -128; \
+ r2 = 32; \
+ r3 = 0; \
+ call %[bpf_probe_read_user]; \
+ /* Call to dummy() forces print_verifier_state(..., true), \
+ * thus showing the stack state, matched by __msg(). \
+ */ \
+ call %[dummy]; \
+ r0 = 0; \
+ exit; \
+"
+ :
+ : __imm(bpf_probe_read_user),
+ __imm(dummy)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/unsupported_ops.c b/tools/testing/selftests/bpf/progs/unsupported_ops.c
new file mode 100644
index 000000000000..8aa2e0dd624e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/unsupported_ops.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/unsupported_ops")
+__failure
+__msg("attach to unsupported member unsupported_ops of struct bpf_testmod_ops")
+int BPF_PROG(unsupported_ops)
+{
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod = {
+ .unsupported_ops = (void *)unsupported_ops,
+};
diff --git a/tools/testing/selftests/bpf/progs/update_map_in_htab.c b/tools/testing/selftests/bpf/progs/update_map_in_htab.c
new file mode 100644
index 000000000000..c2066247cd9c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/update_map_in_htab.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct inner_map_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+} inner_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 2);
+ __array(values, struct inner_map_type);
+} outer_htab_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 2);
+ __array(values, struct inner_map_type);
+} outer_alloc_htab_map SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi.c b/tools/testing/selftests/bpf/progs/uprobe_multi.c
new file mode 100644
index 000000000000..44190efcdba2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/usdt.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 uprobe_multi_func_1_addr = 0;
+__u64 uprobe_multi_func_2_addr = 0;
+__u64 uprobe_multi_func_3_addr = 0;
+
+__u64 uprobe_multi_func_1_result = 0;
+__u64 uprobe_multi_func_2_result = 0;
+__u64 uprobe_multi_func_3_result = 0;
+
+__u64 uretprobe_multi_func_1_result = 0;
+__u64 uretprobe_multi_func_2_result = 0;
+__u64 uretprobe_multi_func_3_result = 0;
+
+__u64 uprobe_multi_sleep_result = 0;
+
+int pid = 0;
+int child_pid = 0;
+int child_tid = 0;
+int child_pid_usdt = 0;
+int child_tid_usdt = 0;
+
+int expect_pid = 0;
+bool bad_pid_seen = false;
+bool bad_pid_seen_usdt = false;
+
+bool test_cookie = false;
+void *user_ptr = 0;
+
+static __always_inline bool verify_sleepable_user_copy(void)
+{
+ char data[9];
+
+ bpf_copy_from_user(data, sizeof(data), user_ptr);
+ return bpf_strncmp(data, sizeof(data), "test_data") == 0;
+}
+
+static void uprobe_multi_check(void *ctx, bool is_return, bool is_sleep)
+{
+ __u64 cur_pid_tgid = bpf_get_current_pid_tgid();
+ __u32 cur_pid;
+
+ cur_pid = cur_pid_tgid >> 32;
+ if (pid && cur_pid != pid)
+ return;
+
+ if (expect_pid && cur_pid != expect_pid)
+ bad_pid_seen = true;
+
+ child_pid = cur_pid_tgid >> 32;
+ child_tid = (__u32)cur_pid_tgid;
+
+ __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
+ __u64 addr = bpf_get_func_ip(ctx);
+
+#define SET(__var, __addr, __cookie) ({ \
+ if (addr == __addr && \
+ (!test_cookie || (cookie == __cookie))) \
+ __var += 1; \
+})
+
+ if (is_return) {
+ SET(uretprobe_multi_func_1_result, uprobe_multi_func_1_addr, 2);
+ SET(uretprobe_multi_func_2_result, uprobe_multi_func_2_addr, 3);
+ SET(uretprobe_multi_func_3_result, uprobe_multi_func_3_addr, 1);
+ } else {
+ SET(uprobe_multi_func_1_result, uprobe_multi_func_1_addr, 3);
+ SET(uprobe_multi_func_2_result, uprobe_multi_func_2_addr, 1);
+ SET(uprobe_multi_func_3_result, uprobe_multi_func_3_addr, 2);
+ }
+
+#undef SET
+
+ if (is_sleep && verify_sleepable_user_copy())
+ uprobe_multi_sleep_result += 1;
+}
+
+SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*")
+int uprobe(struct pt_regs *ctx)
+{
+ uprobe_multi_check(ctx, false, false);
+ return 0;
+}
+
+SEC("uretprobe.multi//proc/self/exe:uprobe_multi_func_*")
+int uretprobe(struct pt_regs *ctx)
+{
+ uprobe_multi_check(ctx, true, false);
+ return 0;
+}
+
+SEC("uprobe.multi.s//proc/self/exe:uprobe_multi_func_*")
+int uprobe_sleep(struct pt_regs *ctx)
+{
+ uprobe_multi_check(ctx, false, true);
+ return 0;
+}
+
+SEC("uretprobe.multi.s//proc/self/exe:uprobe_multi_func_*")
+int uretprobe_sleep(struct pt_regs *ctx)
+{
+ uprobe_multi_check(ctx, true, true);
+ return 0;
+}
+
+SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*")
+int uprobe_extra(struct pt_regs *ctx)
+{
+ /* we need this one just to mix PID-filtered and global uprobes */
+ return 0;
+}
+
+SEC("usdt")
+int usdt_pid(struct pt_regs *ctx)
+{
+ __u64 cur_pid_tgid = bpf_get_current_pid_tgid();
+ __u32 cur_pid;
+
+ cur_pid = cur_pid_tgid >> 32;
+ if (pid && cur_pid != pid)
+ return 0;
+
+ if (expect_pid && cur_pid != expect_pid)
+ bad_pid_seen_usdt = true;
+
+ child_pid_usdt = cur_pid_tgid >> 32;
+ child_tid_usdt = (__u32)cur_pid_tgid;
+
+ return 0;
+}
+
+SEC("usdt")
+int usdt_extra(struct pt_regs *ctx)
+{
+ /* we need this one just to mix PID-filtered and global USDT probes */
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_bench.c b/tools/testing/selftests/bpf/progs/uprobe_multi_bench.c
new file mode 100644
index 000000000000..5367f6105e30
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_bench.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int count;
+
+SEC("uprobe.multi/./uprobe_multi:uprobe_multi_func_*")
+int uprobe_bench(struct pt_regs *ctx)
+{
+ count++;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c
new file mode 100644
index 000000000000..93752bb5690b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__u64 uprobe_result[4];
+
+SEC("uprobe.multi")
+int uprobe_0(struct pt_regs *ctx)
+{
+ uprobe_result[0]++;
+ return 0;
+}
+
+SEC("uprobe.multi")
+int uprobe_1(struct pt_regs *ctx)
+{
+ uprobe_result[1]++;
+ return 0;
+}
+
+SEC("uprobe.session")
+int uprobe_2(struct pt_regs *ctx)
+{
+ uprobe_result[2]++;
+ return 0;
+}
+
+SEC("uprobe.session")
+int uprobe_3(struct pt_regs *ctx)
+{
+ uprobe_result[3]++;
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c b/tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c
new file mode 100644
index 000000000000..67fcbad36661
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 pids[3];
+__u32 test[3][2];
+
+static void update_pid(int idx)
+{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid == pids[idx])
+ test[idx][0]++;
+ else
+ test[idx][1]++;
+}
+
+SEC("uprobe.multi")
+int uprobe_multi_0(struct pt_regs *ctx)
+{
+ update_pid(0);
+ return 0;
+}
+
+SEC("uprobe.multi")
+int uprobe_multi_1(struct pt_regs *ctx)
+{
+ update_pid(1);
+ return 0;
+}
+
+SEC("uprobe.multi")
+int uprobe_multi_2(struct pt_regs *ctx)
+{
+ update_pid(2);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session.c
new file mode 100644
index 000000000000..30bff90b68dc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__u64 uprobe_multi_func_1_addr = 0;
+__u64 uprobe_multi_func_2_addr = 0;
+__u64 uprobe_multi_func_3_addr = 0;
+
+__u64 uprobe_session_result[3] = {};
+__u64 uprobe_multi_sleep_result = 0;
+
+void *user_ptr = 0;
+int pid = 0;
+
+static int uprobe_multi_check(void *ctx, bool is_return)
+{
+ const __u64 funcs[] = {
+ uprobe_multi_func_1_addr,
+ uprobe_multi_func_2_addr,
+ uprobe_multi_func_3_addr,
+ };
+ unsigned int i;
+ __u64 addr;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ addr = bpf_get_func_ip(ctx);
+
+ for (i = 0; i < ARRAY_SIZE(funcs); i++) {
+ if (funcs[i] == addr) {
+ uprobe_session_result[i]++;
+ break;
+ }
+ }
+
+ /* only uprobe_multi_func_2 executes return probe */
+ if ((addr == uprobe_multi_func_1_addr) ||
+ (addr == uprobe_multi_func_3_addr))
+ return 1;
+
+ return 0;
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_*")
+int uprobe(struct pt_regs *ctx)
+{
+ return uprobe_multi_check(ctx, bpf_session_is_return());
+}
+
+static __always_inline bool verify_sleepable_user_copy(void)
+{
+ char data[9];
+
+ bpf_copy_from_user(data, sizeof(data), user_ptr);
+ return bpf_strncmp(data, sizeof(data), "test_data") == 0;
+}
+
+SEC("uprobe.session.s//proc/self/exe:uprobe_multi_func_*")
+int uprobe_sleepable(struct pt_regs *ctx)
+{
+ if (verify_sleepable_user_copy())
+ uprobe_multi_sleep_result++;
+ return uprobe_multi_check(ctx, bpf_session_is_return());
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c
new file mode 100644
index 000000000000..5befdf944dc6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+
+char _license[] SEC("license") = "GPL";
+
+int pid = 0;
+
+__u64 test_uprobe_1_result = 0;
+__u64 test_uprobe_2_result = 0;
+__u64 test_uprobe_3_result = 0;
+
+static int check_cookie(__u64 val, __u64 *result)
+{
+ __u64 *cookie;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ cookie = bpf_session_cookie();
+
+ if (bpf_session_is_return())
+ *result = *cookie == val ? val : 0;
+ else
+ *cookie = val;
+ return 0;
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1")
+int uprobe_1(struct pt_regs *ctx)
+{
+ return check_cookie(1, &test_uprobe_1_result);
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_2")
+int uprobe_2(struct pt_regs *ctx)
+{
+ return check_cookie(2, &test_uprobe_2_result);
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_3")
+int uprobe_3(struct pt_regs *ctx)
+{
+ return check_cookie(3, &test_uprobe_3_result);
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c
new file mode 100644
index 000000000000..8fbcd69fae22
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int pid = 0;
+
+int idx_entry = 0;
+int idx_return = 0;
+
+__u64 test_uprobe_cookie_entry[6];
+__u64 test_uprobe_cookie_return[3];
+
+static int check_cookie(void)
+{
+ __u64 *cookie = bpf_session_cookie();
+
+ if (bpf_session_is_return()) {
+ if (idx_return >= ARRAY_SIZE(test_uprobe_cookie_return))
+ return 1;
+ test_uprobe_cookie_return[idx_return++] = *cookie;
+ return 0;
+ }
+
+ if (idx_entry >= ARRAY_SIZE(test_uprobe_cookie_entry))
+ return 1;
+ *cookie = test_uprobe_cookie_entry[idx_entry];
+ return idx_entry++ % 2;
+}
+
+
+SEC("uprobe.session//proc/self/exe:uprobe_session_recursive")
+int uprobe_recursive(struct pt_regs *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ return check_cookie();
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c
new file mode 100644
index 000000000000..7c960376ae97
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__u64 uprobe_session_result[3] = {};
+int pid = 0;
+
+static int uprobe_multi_check(void *ctx, int idx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ uprobe_session_result[idx]++;
+
+ /* only consumer 1 executes return probe */
+ if (idx == 0 || idx == 2)
+ return 1;
+
+ return 0;
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1")
+int uprobe_0(struct pt_regs *ctx)
+{
+ return uprobe_multi_check(ctx, 0);
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1")
+int uprobe_1(struct pt_regs *ctx)
+{
+ return uprobe_multi_check(ctx, 1);
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1")
+int uprobe_2(struct pt_regs *ctx)
+{
+ return uprobe_multi_check(ctx, 2);
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c b/tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c
new file mode 100644
index 000000000000..9e1c33d0bd2f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+int count;
+
+SEC("usdt")
+int usdt0(struct pt_regs *ctx)
+{
+ count++;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c b/tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c
new file mode 100644
index 000000000000..fe49f2cb5360
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/usdt.bpf.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+
+SEC("uprobe.session")
+__success
+int uprobe_sesison_return_0(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("uprobe.session")
+__success
+int uprobe_sesison_return_1(struct pt_regs *ctx)
+{
+ return 1;
+}
+
+SEC("uprobe.session")
+__failure
+__msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]")
+int uprobe_sesison_return_2(struct pt_regs *ctx)
+{
+ return 2;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_syscall.c b/tools/testing/selftests/bpf/progs/uprobe_syscall.c
new file mode 100644
index 000000000000..8a4fa6c7ef59
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_syscall.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <string.h>
+
+struct pt_regs regs;
+
+char _license[] SEC("license") = "GPL";
+
+SEC("uretprobe//proc/self/exe:uretprobe_regs_trigger")
+int uretprobe(struct pt_regs *ctx)
+{
+ __builtin_memcpy(&regs, ctx, sizeof(regs));
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c b/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c
new file mode 100644
index 000000000000..0d7f1a7db2e2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <string.h>
+
+struct pt_regs regs;
+
+char _license[] SEC("license") = "GPL";
+
+int executed = 0;
+
+SEC("uretprobe.multi")
+int test(struct pt_regs *regs)
+{
+ executed = 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uptr_failure.c b/tools/testing/selftests/bpf/progs/uptr_failure.c
new file mode 100644
index 000000000000..0cfa1fd61440
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uptr_failure.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+#include "uptr_test_common.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct value_type);
+} datamap SEC(".maps");
+
+SEC("?syscall")
+__failure __msg("store to uptr disallowed")
+int uptr_write(const void *ctx)
+{
+ struct task_struct *task;
+ struct value_type *v;
+
+ task = bpf_get_current_task_btf();
+ v = bpf_task_storage_get(&datamap, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!v)
+ return 0;
+
+ v->udata = NULL;
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("store to uptr disallowed")
+int uptr_write_nested(const void *ctx)
+{
+ struct task_struct *task;
+ struct value_type *v;
+
+ task = bpf_get_current_task_btf();
+ v = bpf_task_storage_get(&datamap, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!v)
+ return 0;
+
+ v->nested.udata = NULL;
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("R1 invalid mem access 'mem_or_null'")
+int uptr_no_null_check(const void *ctx)
+{
+ struct task_struct *task;
+ struct value_type *v;
+
+ task = bpf_get_current_task_btf();
+ v = bpf_task_storage_get(&datamap, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!v)
+ return 0;
+
+ v->udata->result = 0;
+
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("doesn't point to kptr")
+int uptr_kptr_xchg(const void *ctx)
+{
+ struct task_struct *task;
+ struct value_type *v;
+
+ task = bpf_get_current_task_btf();
+ v = bpf_task_storage_get(&datamap, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!v)
+ return 0;
+
+ bpf_kptr_xchg(&v->udata, NULL);
+
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("invalid mem access 'scalar'")
+int uptr_obj_new(const void *ctx)
+{
+ struct value_type *v;
+
+ v = bpf_obj_new(typeof(*v));
+ if (!v)
+ return 0;
+
+ if (v->udata)
+ v->udata->result = 0;
+
+ bpf_obj_drop(v);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/uptr_map_failure.c b/tools/testing/selftests/bpf/progs/uptr_map_failure.c
new file mode 100644
index 000000000000..417b763d76b4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uptr_map_failure.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "uptr_test_common.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct large_uptr);
+} large_uptr_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct empty_uptr);
+} empty_uptr_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct kstruct_uptr);
+} kstruct_uptr_map SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/uptr_update_failure.c b/tools/testing/selftests/bpf/progs/uptr_update_failure.c
new file mode 100644
index 000000000000..86c3bb954abc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uptr_update_failure.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "uptr_test_common.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct value_lock_type);
+} datamap SEC(".maps");
+
+/* load test only. not used */
+SEC("syscall")
+int not_used(void *ctx)
+{
+ struct value_lock_type *ptr;
+ struct task_struct *task;
+ struct user_data *udata;
+
+ task = bpf_get_current_task_btf();
+ ptr = bpf_task_storage_get(&datamap, task, 0, 0);
+ if (!ptr)
+ return 0;
+
+ bpf_spin_lock(&ptr->lock);
+
+ udata = ptr->udata;
+ if (!udata) {
+ bpf_spin_unlock(&ptr->lock);
+ return 0;
+ }
+ udata->result = MAGIC_VALUE + udata->a + udata->b;
+
+ bpf_spin_unlock(&ptr->lock);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/uretprobe_stack.c b/tools/testing/selftests/bpf/progs/uretprobe_stack.c
new file mode 100644
index 000000000000..9fdcf396b8f4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uretprobe_stack.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/usdt.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 entry_stack1[32], exit_stack1[32];
+__u64 entry_stack1_recur[32], exit_stack1_recur[32];
+__u64 entry_stack2[32];
+__u64 entry_stack3[32];
+__u64 entry_stack4[32], exit_stack4[32];
+__u64 usdt_stack[32];
+
+int entry1_len, exit1_len;
+int entry1_recur_len, exit1_recur_len;
+int entry2_len, exit2_len;
+int entry3_len, exit3_len;
+int entry4_len, exit4_len;
+int usdt_len;
+
+#define SZ sizeof(usdt_stack)
+
+SEC("uprobe//proc/self/exe:target_1")
+int BPF_UPROBE(uprobe_1)
+{
+ /* target_1 is recursive wit depth of 2, so we capture two separate
+ * stack traces, depending on which occurence it is
+ */
+ static bool recur = false;
+
+ if (!recur)
+ entry1_len = bpf_get_stack(ctx, &entry_stack1, SZ, BPF_F_USER_STACK);
+ else
+ entry1_recur_len = bpf_get_stack(ctx, &entry_stack1_recur, SZ, BPF_F_USER_STACK);
+
+ recur = true;
+ return 0;
+}
+
+SEC("uretprobe//proc/self/exe:target_1")
+int BPF_URETPROBE(uretprobe_1)
+{
+ /* see above, target_1 is recursive */
+ static bool recur = false;
+
+ /* NOTE: order of returns is reversed to order of entries */
+ if (!recur)
+ exit1_recur_len = bpf_get_stack(ctx, &exit_stack1_recur, SZ, BPF_F_USER_STACK);
+ else
+ exit1_len = bpf_get_stack(ctx, &exit_stack1, SZ, BPF_F_USER_STACK);
+
+ recur = true;
+ return 0;
+}
+
+SEC("uprobe//proc/self/exe:target_2")
+int BPF_UPROBE(uprobe_2)
+{
+ entry2_len = bpf_get_stack(ctx, &entry_stack2, SZ, BPF_F_USER_STACK);
+ return 0;
+}
+
+/* no uretprobe for target_2 */
+
+SEC("uprobe//proc/self/exe:target_3")
+int BPF_UPROBE(uprobe_3)
+{
+ entry3_len = bpf_get_stack(ctx, &entry_stack3, SZ, BPF_F_USER_STACK);
+ return 0;
+}
+
+/* no uretprobe for target_3 */
+
+SEC("uprobe//proc/self/exe:target_4")
+int BPF_UPROBE(uprobe_4)
+{
+ entry4_len = bpf_get_stack(ctx, &entry_stack4, SZ, BPF_F_USER_STACK);
+ return 0;
+}
+
+SEC("uretprobe//proc/self/exe:target_4")
+int BPF_URETPROBE(uretprobe_4)
+{
+ exit4_len = bpf_get_stack(ctx, &exit_stack4, SZ, BPF_F_USER_STACK);
+ return 0;
+}
+
+SEC("usdt//proc/self/exe:uretprobe_stack:target")
+int BPF_USDT(usdt_probe)
+{
+ usdt_len = bpf_get_stack(ctx, &usdt_stack, SZ, BPF_F_USER_STACK);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
new file mode 100644
index 000000000000..54de0389f878
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct sample {
+ int pid;
+ int seq;
+ long value;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_USER_RINGBUF);
+ __uint(max_entries, 4096);
+} user_ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 2);
+} ringbuf SEC(".maps");
+
+static int map_value;
+
+static long
+bad_access1(struct bpf_dynptr *dynptr, void *context)
+{
+ const struct sample *sample;
+
+ sample = bpf_dynptr_data(dynptr - 1, 0, sizeof(*sample));
+ bpf_printk("Was able to pass bad pointer %lx\n", (__u64)dynptr - 1);
+
+ return 0;
+}
+
+/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
+ * not be able to read before the pointer.
+ */
+SEC("?raw_tp")
+__failure __msg("negative offset dynptr_ptr ptr")
+int user_ringbuf_callback_bad_access1(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf, bad_access1, NULL, 0);
+
+ return 0;
+}
+
+static long
+bad_access2(struct bpf_dynptr *dynptr, void *context)
+{
+ const struct sample *sample;
+
+ sample = bpf_dynptr_data(dynptr + 1, 0, sizeof(*sample));
+ bpf_printk("Was able to pass bad pointer %lx\n", (__u64)dynptr + 1);
+
+ return 0;
+}
+
+/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
+ * not be able to read past the end of the pointer.
+ */
+SEC("?raw_tp")
+__failure __msg("dereference of modified dynptr_ptr ptr")
+int user_ringbuf_callback_bad_access2(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf, bad_access2, NULL, 0);
+
+ return 0;
+}
+
+static long
+write_forbidden(struct bpf_dynptr *dynptr, void *context)
+{
+ *((long *)dynptr) = 0;
+
+ return 0;
+}
+
+/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
+ * not be able to write to that pointer.
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'dynptr_ptr'")
+int user_ringbuf_callback_write_forbidden(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf, write_forbidden, NULL, 0);
+
+ return 0;
+}
+
+static long
+null_context_write(struct bpf_dynptr *dynptr, void *context)
+{
+ *((__u64 *)context) = 0;
+
+ return 0;
+}
+
+/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
+ * not be able to write to that pointer.
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int user_ringbuf_callback_null_context_write(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf, null_context_write, NULL, 0);
+
+ return 0;
+}
+
+static long
+null_context_read(struct bpf_dynptr *dynptr, void *context)
+{
+ __u64 id = *((__u64 *)context);
+
+ bpf_printk("Read id %lu\n", id);
+
+ return 0;
+}
+
+/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
+ * not be able to write to that pointer.
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int user_ringbuf_callback_null_context_read(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf, null_context_read, NULL, 0);
+
+ return 0;
+}
+
+static long
+try_discard_dynptr(struct bpf_dynptr *dynptr, void *context)
+{
+ bpf_ringbuf_discard_dynptr(dynptr, 0);
+
+ return 0;
+}
+
+/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
+ * not be able to read past the end of the pointer.
+ */
+SEC("?raw_tp")
+__failure __msg("cannot release unowned const bpf_dynptr")
+int user_ringbuf_callback_discard_dynptr(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf, try_discard_dynptr, NULL, 0);
+
+ return 0;
+}
+
+static long
+try_submit_dynptr(struct bpf_dynptr *dynptr, void *context)
+{
+ bpf_ringbuf_submit_dynptr(dynptr, 0);
+
+ return 0;
+}
+
+/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
+ * not be able to read past the end of the pointer.
+ */
+SEC("?raw_tp")
+__failure __msg("cannot release unowned const bpf_dynptr")
+int user_ringbuf_callback_submit_dynptr(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf, try_submit_dynptr, NULL, 0);
+
+ return 0;
+}
+
+static long
+invalid_drain_callback_return(struct bpf_dynptr *dynptr, void *context)
+{
+ return 2;
+}
+
+/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
+ * not be able to write to that pointer.
+ */
+SEC("?raw_tp")
+__failure __msg("At callback return the register R0 has ")
+int user_ringbuf_callback_invalid_return(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf, invalid_drain_callback_return, NULL, 0);
+
+ return 0;
+}
+
+static long
+try_reinit_dynptr_mem(struct bpf_dynptr *dynptr, void *context)
+{
+ bpf_dynptr_from_mem(&map_value, 4, 0, dynptr);
+ return 0;
+}
+
+static long
+try_reinit_dynptr_ringbuf(struct bpf_dynptr *dynptr, void *context)
+{
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, dynptr);
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("Dynptr has to be an uninitialized dynptr")
+int user_ringbuf_callback_reinit_dynptr_mem(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_mem, NULL, 0);
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("Dynptr has to be an uninitialized dynptr")
+int user_ringbuf_callback_reinit_dynptr_ringbuf(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_ringbuf, NULL, 0);
+ return 0;
+}
+
+__noinline long global_call_bpf_dynptr_data(struct bpf_dynptr *dynptr)
+{
+ bpf_dynptr_data(dynptr, 0xA, 0xA);
+ return 0;
+}
+
+static long callback_adjust_bpf_dynptr_reg_off(struct bpf_dynptr *dynptr,
+ void *ctx)
+{
+ global_call_bpf_dynptr_data(dynptr += 1024);
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("dereference of modified dynptr_ptr ptr R1 off=16384 disallowed")
+int user_ringbuf_callback_const_ptr_to_dynptr_reg_off(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf,
+ callback_adjust_bpf_dynptr_reg_off, NULL, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_success.c b/tools/testing/selftests/bpf/progs/user_ringbuf_success.c
new file mode 100644
index 000000000000..dd3bdf672633
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/user_ringbuf_success.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "test_user_ringbuf.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_USER_RINGBUF);
+} user_ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+} kernel_ringbuf SEC(".maps");
+
+/* inputs */
+int pid, err, val;
+
+int read = 0;
+
+/* Counter used for end-to-end protocol test */
+__u64 kern_mutated = 0;
+__u64 user_mutated = 0;
+__u64 expected_user_mutated = 0;
+
+static int
+is_test_process(void)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+
+ return cur_pid == pid;
+}
+
+static long
+record_sample(struct bpf_dynptr *dynptr, void *context)
+{
+ const struct sample *sample = NULL;
+ struct sample stack_sample;
+ int status;
+ static int num_calls;
+
+ if (num_calls++ % 2 == 0) {
+ status = bpf_dynptr_read(&stack_sample, sizeof(stack_sample), dynptr, 0, 0);
+ if (status) {
+ bpf_printk("bpf_dynptr_read() failed: %d\n", status);
+ err = 1;
+ return 1;
+ }
+ } else {
+ sample = bpf_dynptr_data(dynptr, 0, sizeof(*sample));
+ if (!sample) {
+ bpf_printk("Unexpectedly failed to get sample\n");
+ err = 2;
+ return 1;
+ }
+ stack_sample = *sample;
+ }
+
+ __sync_fetch_and_add(&read, 1);
+ return 0;
+}
+
+static void
+handle_sample_msg(const struct test_msg *msg)
+{
+ switch (msg->msg_op) {
+ case TEST_MSG_OP_INC64:
+ kern_mutated += msg->operand_64;
+ break;
+ case TEST_MSG_OP_INC32:
+ kern_mutated += msg->operand_32;
+ break;
+ case TEST_MSG_OP_MUL64:
+ kern_mutated *= msg->operand_64;
+ break;
+ case TEST_MSG_OP_MUL32:
+ kern_mutated *= msg->operand_32;
+ break;
+ default:
+ bpf_printk("Unrecognized op %d\n", msg->msg_op);
+ err = 2;
+ }
+}
+
+static long
+read_protocol_msg(struct bpf_dynptr *dynptr, void *context)
+{
+ const struct test_msg *msg = NULL;
+
+ msg = bpf_dynptr_data(dynptr, 0, sizeof(*msg));
+ if (!msg) {
+ err = 1;
+ bpf_printk("Unexpectedly failed to get msg\n");
+ return 0;
+ }
+
+ handle_sample_msg(msg);
+
+ return 0;
+}
+
+static int publish_next_kern_msg(__u32 index, void *context)
+{
+ struct test_msg *msg = NULL;
+ int operand_64 = TEST_OP_64;
+ int operand_32 = TEST_OP_32;
+
+ msg = bpf_ringbuf_reserve(&kernel_ringbuf, sizeof(*msg), 0);
+ if (!msg) {
+ err = 4;
+ return 1;
+ }
+
+ switch (index % TEST_MSG_OP_NUM_OPS) {
+ case TEST_MSG_OP_INC64:
+ msg->operand_64 = operand_64;
+ msg->msg_op = TEST_MSG_OP_INC64;
+ expected_user_mutated += operand_64;
+ break;
+ case TEST_MSG_OP_INC32:
+ msg->operand_32 = operand_32;
+ msg->msg_op = TEST_MSG_OP_INC32;
+ expected_user_mutated += operand_32;
+ break;
+ case TEST_MSG_OP_MUL64:
+ msg->operand_64 = operand_64;
+ msg->msg_op = TEST_MSG_OP_MUL64;
+ expected_user_mutated *= operand_64;
+ break;
+ case TEST_MSG_OP_MUL32:
+ msg->operand_32 = operand_32;
+ msg->msg_op = TEST_MSG_OP_MUL32;
+ expected_user_mutated *= operand_32;
+ break;
+ default:
+ bpf_ringbuf_discard(msg, 0);
+ err = 5;
+ return 1;
+ }
+
+ bpf_ringbuf_submit(msg, 0);
+
+ return 0;
+}
+
+static void
+publish_kern_messages(void)
+{
+ if (expected_user_mutated != user_mutated) {
+ bpf_printk("%lu != %lu\n", expected_user_mutated, user_mutated);
+ err = 3;
+ return;
+ }
+
+ bpf_loop(8, publish_next_kern_msg, NULL, 0);
+}
+
+SEC("fentry/" SYS_PREFIX "sys_prctl")
+int test_user_ringbuf_protocol(void *ctx)
+{
+ long status = 0;
+
+ if (!is_test_process())
+ return 0;
+
+ status = bpf_user_ringbuf_drain(&user_ringbuf, read_protocol_msg, NULL, 0);
+ if (status < 0) {
+ bpf_printk("Drain returned: %ld\n", status);
+ err = 1;
+ return 0;
+ }
+
+ publish_kern_messages();
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_user_ringbuf(void *ctx)
+{
+ if (!is_test_process())
+ return 0;
+
+ err = bpf_user_ringbuf_drain(&user_ringbuf, record_sample, NULL, 0);
+
+ return 0;
+}
+
+static long
+do_nothing_cb(struct bpf_dynptr *dynptr, void *context)
+{
+ __sync_fetch_and_add(&read, 1);
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_prlimit64")
+int test_user_ringbuf_epoll(void *ctx)
+{
+ long num_samples;
+
+ if (!is_test_process())
+ return 0;
+
+ num_samples = bpf_user_ringbuf_drain(&user_ringbuf, do_nothing_cb, NULL, 0);
+ if (num_samples <= 0)
+ err = 1;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_and.c b/tools/testing/selftests/bpf/progs/verifier_and.c
new file mode 100644
index 000000000000..2b4fdca162be
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_and.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/and.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+SEC("socket")
+__description("invalid and of negative number")
+__failure __msg("R0 max value is outside of the allowed memory range")
+__failure_unpriv
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void invalid_and_of_negative_number(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u8*)(r0 + 0); \
+ r1 &= -4; \
+ r1 <<= 2; \
+ r0 += r1; \
+l0_%=: r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid range check")
+__failure __msg("R0 max value is outside of the allowed memory range")
+__failure_unpriv
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void invalid_range_check(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u32*)(r0 + 0); \
+ r9 = 1; \
+ w1 %%= 2; \
+ w1 += 1; \
+ w9 &= w1; \
+ w9 += 1; \
+ w9 >>= 1; \
+ w3 = 1; \
+ w3 -= w9; \
+ w3 *= 0x10000000; \
+ r0 += r3; \
+ *(u32*)(r0 + 0) = r3; \
+l0_%=: r0 = r0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check known subreg with unknown reg")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if w0 < 0x1 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R1 !read_ok'` */
+__xlated_unpriv("goto pc-1") /* `r1 = *(u32*)(r1 + 512)`, sanitized dead code */
+__xlated_unpriv("r0 = 0")
+#endif
+__naked void known_subreg_with_unknown_reg(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r0 <<= 32; \
+ r0 += 1; \
+ r0 &= 0xFFFF1234; \
+ /* Upper bits are unknown but AND above masks out 1 zero'ing lower bits */\
+ if w0 < 1 goto l0_%=; \
+ r1 = *(u32*)(r1 + 512); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c
new file mode 100644
index 000000000000..7f4827eede3c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_arena.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+#include "bpf_arena_common.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 2); /* arena of two pages close to 32-bit boundary*/
+#ifdef __TARGET_ARCH_arm64
+ __ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */
+#else
+ __ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */
+#endif
+} arena SEC(".maps");
+
+SEC("syscall")
+__success __retval(0)
+int basic_alloc1(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ volatile int __arena *page1, *page2, *no_page, *page3;
+
+ page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page1)
+ return 1;
+ *page1 = 1;
+ page2 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page2)
+ return 2;
+ *page2 = 2;
+ no_page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (no_page)
+ return 3;
+ if (*page1 != 1)
+ return 4;
+ if (*page2 != 2)
+ return 5;
+ bpf_arena_free_pages(&arena, (void __arena *)page2, 1);
+ if (*page1 != 1)
+ return 6;
+ if (*page2 != 0) /* use-after-free should return 0 */
+ return 7;
+ page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page3)
+ return 8;
+ *page3 = 3;
+ if (page2 != page3)
+ return 9;
+ if (*page1 != 1)
+ return 10;
+#endif
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+int basic_alloc2(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ volatile char __arena *page1, *page2, *page3, *page4;
+
+ page1 = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0);
+ if (!page1)
+ return 1;
+ page2 = page1 + __PAGE_SIZE;
+ page3 = page1 + __PAGE_SIZE * 2;
+ page4 = page1 - __PAGE_SIZE;
+ *page1 = 1;
+ *page2 = 2;
+ *page3 = 3;
+ *page4 = 4;
+ if (*page1 != 1)
+ return 1;
+ if (*page2 != 2)
+ return 2;
+ if (*page3 != 0)
+ return 3;
+ if (*page4 != 0)
+ return 4;
+ bpf_arena_free_pages(&arena, (void __arena *)page1, 2);
+ if (*page1 != 0)
+ return 5;
+ if (*page2 != 0)
+ return 6;
+ if (*page3 != 0)
+ return 7;
+ if (*page4 != 0)
+ return 8;
+#endif
+ return 0;
+}
+
+struct bpf_arena___l {
+ struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+SEC("syscall")
+__success __retval(0) __log_level(2)
+int basic_alloc3(void *ctx)
+{
+ struct bpf_arena___l *ar = (struct bpf_arena___l *)&arena;
+ volatile char __arena *pages;
+
+ pages = bpf_arena_alloc_pages(&ar->map, NULL, ar->map.max_entries, NUMA_NO_NODE, 0);
+ if (!pages)
+ return 1;
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+int basic_reserve1(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ char __arena *page;
+ int ret;
+
+ page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page)
+ return 1;
+
+ page += __PAGE_SIZE;
+
+ /* Reserve the second page */
+ ret = bpf_arena_reserve_pages(&arena, page, 1);
+ if (ret)
+ return 2;
+
+ /* Try to explicitly allocate the reserved page. */
+ page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0);
+ if (page)
+ return 3;
+
+ /* Try to implicitly allocate the page (since there's only 2 of them). */
+ page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (page)
+ return 4;
+#endif
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+int basic_reserve2(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ char __arena *page;
+ int ret;
+
+ page = arena_base(&arena);
+ ret = bpf_arena_reserve_pages(&arena, page, 1);
+ if (ret)
+ return 1;
+
+ page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0);
+ if ((u64)page)
+ return 2;
+#endif
+ return 0;
+}
+
+/* Reserve the same page twice, should return -EBUSY. */
+SEC("syscall")
+__success __retval(0)
+int reserve_twice(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ char __arena *page;
+ int ret;
+
+ page = arena_base(&arena);
+
+ ret = bpf_arena_reserve_pages(&arena, page, 1);
+ if (ret)
+ return 1;
+
+ ret = bpf_arena_reserve_pages(&arena, page, 1);
+ if (ret != -EBUSY)
+ return 2;
+#endif
+ return 0;
+}
+
+/* Try to reserve past the end of the arena. */
+SEC("syscall")
+__success __retval(0)
+int reserve_invalid_region(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ char __arena *page;
+ int ret;
+
+ /* Try a NULL pointer. */
+ ret = bpf_arena_reserve_pages(&arena, NULL, 3);
+ if (ret != -EINVAL)
+ return 1;
+
+ page = arena_base(&arena);
+
+ ret = bpf_arena_reserve_pages(&arena, page, 3);
+ if (ret != -EINVAL)
+ return 2;
+
+ ret = bpf_arena_reserve_pages(&arena, page, 4096);
+ if (ret != -EINVAL)
+ return 3;
+
+ ret = bpf_arena_reserve_pages(&arena, page, (1ULL << 32) - 1);
+ if (ret != -EINVAL)
+ return 4;
+#endif
+ return 0;
+}
+
+SEC("iter.s/bpf_map")
+__success __log_level(2)
+int iter_maps1(struct bpf_iter__bpf_map *ctx)
+{
+ struct bpf_map *map = ctx->map;
+
+ if (!map)
+ return 0;
+ bpf_arena_alloc_pages(map, NULL, map->max_entries, 0, 0);
+ return 0;
+}
+
+SEC("iter.s/bpf_map")
+__failure __msg("expected pointer to STRUCT bpf_map")
+int iter_maps2(struct bpf_iter__bpf_map *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+
+ bpf_arena_alloc_pages((void *)seq, NULL, 1, 0, 0);
+ return 0;
+}
+
+SEC("iter.s/bpf_map")
+__failure __msg("untrusted_ptr_bpf_map")
+int iter_maps3(struct bpf_iter__bpf_map *ctx)
+{
+ struct bpf_map *map = ctx->map;
+
+ if (!map)
+ return 0;
+ bpf_arena_alloc_pages(map->inner_map_meta, NULL, map->max_entries, 0, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
new file mode 100644
index 000000000000..9dbdf123542d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+#include "bpf_arena_common.h"
+
+#define ARENA_SIZE (1ull << 32)
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, ARENA_SIZE / PAGE_SIZE);
+} arena SEC(".maps");
+
+SEC("syscall")
+__success __retval(0)
+int big_alloc1(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ volatile char __arena *page1, *page2, *no_page, *page3;
+ void __arena *base;
+
+ page1 = base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page1)
+ return 1;
+ *page1 = 1;
+ page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE * 2,
+ 1, NUMA_NO_NODE, 0);
+ if (!page2)
+ return 2;
+ *page2 = 2;
+ no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE,
+ 1, NUMA_NO_NODE, 0);
+ if (no_page)
+ return 3;
+ if (*page1 != 1)
+ return 4;
+ if (*page2 != 2)
+ return 5;
+ bpf_arena_free_pages(&arena, (void __arena *)page1, 1);
+ if (*page2 != 2)
+ return 6;
+ if (*page1 != 0) /* use-after-free should return 0 */
+ return 7;
+ page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page3)
+ return 8;
+ *page3 = 3;
+ if (page1 != page3)
+ return 9;
+ if (*page2 != 2)
+ return 10;
+ if (*(page1 + PAGE_SIZE) != 0)
+ return 11;
+ if (*(page1 - PAGE_SIZE) != 0)
+ return 12;
+ if (*(page2 + PAGE_SIZE) != 0)
+ return 13;
+ if (*(page2 - PAGE_SIZE) != 0)
+ return 14;
+#endif
+ return 0;
+}
+
+/* Try to access a reserved page. Behavior should be identical with accessing unallocated pages. */
+SEC("syscall")
+__success __retval(0)
+int access_reserved(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ volatile char __arena *page;
+ char __arena *base;
+ const size_t len = 4;
+ int ret, i;
+
+ /* Get a separate region of the arena. */
+ page = base = arena_base(&arena) + 16384 * PAGE_SIZE;
+
+ ret = bpf_arena_reserve_pages(&arena, base, len);
+ if (ret)
+ return 1;
+
+ /* Try to dirty reserved memory. */
+ for (i = 0; i < len && can_loop; i++)
+ *page = 0x5a;
+
+ for (i = 0; i < len && can_loop; i++) {
+ page = (volatile char __arena *)(base + i * PAGE_SIZE);
+
+ /*
+ * Error out in case either the write went through,
+ * or the address has random garbage.
+ */
+ if (*page == 0x5a)
+ return 2 + 2 * i;
+
+ if (*page)
+ return 2 + 2 * i + 1;
+ }
+#endif
+ return 0;
+}
+
+/* Try to allocate a region overlapping with a reservation. */
+SEC("syscall")
+__success __retval(0)
+int request_partially_reserved(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ volatile char __arena *page;
+ char __arena *base;
+ int ret;
+
+ /* Add an arbitrary page offset. */
+ page = base = arena_base(&arena) + 4096 * __PAGE_SIZE;
+
+ ret = bpf_arena_reserve_pages(&arena, base + 3 * __PAGE_SIZE, 4);
+ if (ret)
+ return 1;
+
+ page = bpf_arena_alloc_pages(&arena, base, 5, NUMA_NO_NODE, 0);
+ if ((u64)page != 0ULL)
+ return 2;
+#endif
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+int free_reserved(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ char __arena *addr;
+ char __arena *page;
+ int ret;
+
+ /* Add an arbitrary page offset. */
+ addr = arena_base(&arena) + 32768 * __PAGE_SIZE;
+
+ page = bpf_arena_alloc_pages(&arena, addr, 2, NUMA_NO_NODE, 0);
+ if (!page)
+ return 1;
+
+ ret = bpf_arena_reserve_pages(&arena, addr + 2 * __PAGE_SIZE, 2);
+ if (ret)
+ return 2;
+
+ /*
+ * Reserved and allocated pages should be interchangeable for
+ * bpf_arena_free_pages(). Free a reserved and an allocated
+ * page with a single call.
+ */
+ bpf_arena_free_pages(&arena, addr + __PAGE_SIZE , 2);
+
+ /* The free call above should have succeeded, so this allocation should too. */
+ page = bpf_arena_alloc_pages(&arena, addr + __PAGE_SIZE, 2, NUMA_NO_NODE, 0);
+ if (!page)
+ return 3;
+#endif
+ return 0;
+}
+
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+#define PAGE_CNT 100
+__u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */
+__u8 __arena *base;
+
+/*
+ * Check that arena's range_tree algorithm allocates pages sequentially
+ * on the first pass and then fills in all gaps on the second pass.
+ */
+__noinline int alloc_pages(int page_cnt, int pages_atonce, bool first_pass,
+ int max_idx, int step)
+{
+ __u8 __arena *pg;
+ int i, pg_idx;
+
+ for (i = 0; i < page_cnt; i++) {
+ pg = bpf_arena_alloc_pages(&arena, NULL, pages_atonce,
+ NUMA_NO_NODE, 0);
+ if (!pg)
+ return step;
+ pg_idx = (unsigned long) (pg - base) / PAGE_SIZE;
+ if (first_pass) {
+ /* Pages must be allocated sequentially */
+ if (pg_idx != i)
+ return step + 100;
+ } else {
+ /* Allocator must fill into gaps */
+ if (pg_idx >= max_idx || (pg_idx & 1))
+ return step + 200;
+ }
+ *pg = pg_idx;
+ page[pg_idx] = pg;
+ cond_break;
+ }
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+int big_alloc2(void *ctx)
+{
+ __u8 __arena *pg;
+ int i, err;
+
+ base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!base)
+ return 1;
+ bpf_arena_free_pages(&arena, (void __arena *)base, 1);
+
+ err = alloc_pages(PAGE_CNT, 1, true, PAGE_CNT, 2);
+ if (err)
+ return err;
+
+ /* Clear all even pages */
+ for (i = 0; i < PAGE_CNT; i += 2) {
+ pg = page[i];
+ if (*pg != i)
+ return 3;
+ bpf_arena_free_pages(&arena, (void __arena *)pg, 1);
+ page[i] = NULL;
+ cond_break;
+ }
+
+ /* Allocate into freed gaps */
+ err = alloc_pages(PAGE_CNT / 2, 1, false, PAGE_CNT, 4);
+ if (err)
+ return err;
+
+ /* Free pairs of pages */
+ for (i = 0; i < PAGE_CNT; i += 4) {
+ pg = page[i];
+ if (*pg != i)
+ return 5;
+ bpf_arena_free_pages(&arena, (void __arena *)pg, 2);
+ page[i] = NULL;
+ page[i + 1] = NULL;
+ cond_break;
+ }
+
+ /* Allocate 2 pages at a time into freed gaps */
+ err = alloc_pages(PAGE_CNT / 4, 2, false, PAGE_CNT, 6);
+ if (err)
+ return err;
+
+ /* Check pages without freeing */
+ for (i = 0; i < PAGE_CNT; i += 2) {
+ pg = page[i];
+ if (*pg != i)
+ return 7;
+ cond_break;
+ }
+
+ pg = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+
+ if (!pg)
+ return 8;
+ /*
+ * The first PAGE_CNT pages are occupied. The new page
+ * must be above.
+ */
+ if ((pg - base) / PAGE_SIZE < PAGE_CNT)
+ return 9;
+ return 0;
+}
+#endif
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_array_access.c b/tools/testing/selftests/bpf/progs/verifier_array_access.c
new file mode 100644
index 000000000000..0a187ff725cc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_array_access.c
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/array_access.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct test_val);
+ __uint(map_flags, BPF_F_RDONLY_PROG);
+} map_array_ro SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct test_val);
+ __uint(map_flags, BPF_F_WRONLY_PROG);
+} map_array_wo SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, struct test_val);
+} map_array_pcpu SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, struct test_val);
+} map_array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+SEC("socket")
+__description("valid map access into an array with a constant")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(0)
+__naked void an_array_with_a_constant_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("valid map access into an array with a register")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void an_array_with_a_register_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 4; \
+ r1 <<= 2; \
+ r0 += r1; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("valid map access into an array with a variable")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void an_array_with_a_variable_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u32*)(r0 + 0); \
+ if r1 >= %[max_entries] goto l0_%=; \
+ r1 <<= 2; \
+ r0 += r1; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(max_entries, MAX_ENTRIES),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("valid map access into an array with a signed variable")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void array_with_a_signed_variable(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u32*)(r0 + 0); \
+ if w1 s> 0xffffffff goto l1_%=; \
+ w1 = 0; \
+l1_%=: w2 = %[max_entries]; \
+ if r2 s> r1 goto l2_%=; \
+ w1 = 0; \
+l2_%=: w1 <<= 2; \
+ r0 += r1; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(max_entries, MAX_ENTRIES),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid map access into an array with a constant")
+__failure __msg("invalid access to map value, value_size=48 off=48 size=8")
+__failure_unpriv
+__naked void an_array_with_a_constant_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + %[__imm_0]) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, (MAX_ENTRIES + 1) << 2),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid map access into an array with a register")
+__failure __msg("R0 min value is outside of the allowed memory range")
+__failure_unpriv
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void an_array_with_a_register_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = %[__imm_0]; \
+ r1 <<= 2; \
+ r0 += r1; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, MAX_ENTRIES + 1),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid map access into an array with a variable")
+__failure
+__msg("R0 unbounded memory access, make sure to bounds check any such access")
+__failure_unpriv
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void an_array_with_a_variable_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u32*)(r0 + 0); \
+ r1 <<= 2; \
+ r0 += r1; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid map access into an array with no floor check")
+__failure __msg("R0 unbounded memory access")
+__failure_unpriv __msg_unpriv("R0 leaks addr")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void array_with_no_floor_check(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r0 + 0); \
+ w2 = %[max_entries]; \
+ if r2 s> r1 goto l1_%=; \
+ w1 = 0; \
+l1_%=: w1 <<= 2; \
+ r0 += r1; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(max_entries, MAX_ENTRIES),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid map access into an array with a invalid max check")
+__failure __msg("invalid access to map value, value_size=48 off=44 size=8")
+__failure_unpriv __msg_unpriv("R0 leaks addr")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void with_a_invalid_max_check_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u32*)(r0 + 0); \
+ w2 = %[__imm_0]; \
+ if r2 > r1 goto l1_%=; \
+ w1 = 0; \
+l1_%=: w1 <<= 2; \
+ r0 += r1; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, MAX_ENTRIES + 1),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid map access into an array with a invalid max check")
+__failure __msg("R0 pointer += pointer")
+__failure_unpriv
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void with_a_invalid_max_check_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r8 = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r0 += r8; \
+ r0 = *(u32*)(r0 + %[test_val_foo]); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("valid read map access into a read-only array 1")
+__success __success_unpriv __retval(28)
+__naked void a_read_only_array_1_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_ro] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r0 = *(u32*)(r0 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_ro)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("valid read map access into a read-only array 2")
+__success __retval(65507)
+__naked void a_read_only_array_2_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_ro] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = 4; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_ro)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid write map access into a read-only array 1")
+__failure __msg("write into map forbidden")
+__failure_unpriv
+__naked void a_read_only_array_1_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_ro] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 42; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_ro)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("invalid write map access into a read-only array 2")
+__failure __msg("write into map forbidden")
+__naked void a_read_only_array_2_2(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_ro] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r6; \
+ r2 = 0; \
+ r3 = r0; \
+ r4 = 8; \
+ call %[bpf_skb_load_bytes]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_skb_load_bytes),
+ __imm_addr(map_array_ro)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("valid write map access into a write-only array 1")
+__success __success_unpriv __retval(1)
+__naked void a_write_only_array_1_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_wo] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 42; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_wo)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("valid write map access into a write-only array 2")
+__success __retval(0)
+__naked void a_write_only_array_2_1(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_wo] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r6; \
+ r2 = 0; \
+ r3 = r0; \
+ r4 = 8; \
+ call %[bpf_skb_load_bytes]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_skb_load_bytes),
+ __imm_addr(map_array_wo)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid read map access into a write-only array 1")
+__failure __msg("read from map forbidden")
+__failure_unpriv
+__naked void a_write_only_array_1_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_wo] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r0 = *(u64*)(r0 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_wo)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("invalid read map access into a write-only array 2")
+__failure __msg("read from map forbidden")
+__naked void a_write_only_array_2_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_wo] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = 4; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_wo)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("valid map access into an array using constant without nullness")
+__success __retval(4) __log_level(2)
+__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}")
+unsigned int an_array_with_a_constant_no_nullness(void)
+{
+ /* Need 8-byte alignment for spill tracking */
+ __u32 __attribute__((aligned(8))) key = 1;
+ struct test_val *val;
+
+ val = bpf_map_lookup_elem(&map_array, &key);
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("valid multiple map access into an array using constant without nullness")
+__success __retval(8) __log_level(2)
+__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -16) = {{(0|r[0-9])}}")
+__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}")
+unsigned int multiple_array_with_a_constant_no_nullness(void)
+{
+ __u32 __attribute__((aligned(8))) key = 1;
+ __u32 __attribute__((aligned(8))) key2 = 0;
+ struct test_val *val, *val2;
+
+ val = bpf_map_lookup_elem(&map_array, &key);
+ val->index = offsetof(struct test_val, foo);
+
+ val2 = bpf_map_lookup_elem(&map_array, &key2);
+ val2->index = offsetof(struct test_val, foo);
+
+ return val->index + val2->index;
+}
+
+SEC("socket")
+__description("valid map access into an array using natural aligned 32-bit constant 0 without nullness")
+__success __retval(4)
+unsigned int an_array_with_a_32bit_constant_0_no_nullness(void)
+{
+ /* Unlike the above tests, 32-bit zeroing is precisely tracked even
+ * if writes are not aligned to BPF_REG_SIZE. This tests that our
+ * STACK_ZERO handling functions.
+ */
+ struct test_val *val;
+ __u32 key = 0;
+
+ val = bpf_map_lookup_elem(&map_array, &key);
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("valid map access into a pcpu array using constant without nullness")
+__success __retval(4) __log_level(2)
+__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}")
+unsigned int a_pcpu_array_with_a_constant_no_nullness(void)
+{
+ __u32 __attribute__((aligned(8))) key = 1;
+ struct test_val *val;
+
+ val = bpf_map_lookup_elem(&map_array_pcpu, &key);
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("invalid map access into an array using constant without nullness")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+unsigned int an_array_with_a_constant_no_nullness_out_of_bounds(void)
+{
+ /* Out of bounds */
+ __u32 __attribute__((aligned(8))) key = 3;
+ struct test_val *val;
+
+ val = bpf_map_lookup_elem(&map_array, &key);
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("invalid map access into an array using constant smaller than key_size")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+unsigned int an_array_with_a_constant_too_small(void)
+{
+ __u32 __attribute__((aligned(8))) key;
+ struct test_val *val;
+
+ /* Mark entire key as STACK_MISC */
+ bpf_probe_read_user(&key, sizeof(key), NULL);
+
+ /* Spilling only the bottom byte results in a tnum const of 1.
+ * We want to check that the verifier rejects it, as the spill is < 4B.
+ */
+ *(__u8 *)&key = 1;
+ val = bpf_map_lookup_elem(&map_array, &key);
+
+ /* Should fail, as verifier cannot prove in-bound lookup */
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("invalid map access into an array using constant larger than key_size")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+unsigned int an_array_with_a_constant_too_big(void)
+{
+ struct test_val *val;
+ __u64 key = 1;
+
+ /* Even if the constant value is < max_entries, if the spill size is
+ * larger than the key size, the set bits may not be where we expect them
+ * to be on different endian architectures.
+ */
+ val = bpf_map_lookup_elem(&map_array, &key);
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("invalid elided lookup using const and non-const key")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+unsigned int mixed_const_and_non_const_key_lookup(void)
+{
+ __u32 __attribute__((aligned(8))) key;
+ struct test_val *val;
+ __u32 rand;
+
+ rand = bpf_get_prandom_u32();
+ key = rand > 42 ? 1 : rand;
+ val = bpf_map_lookup_elem(&map_array, &key);
+
+ return val->index;
+}
+
+SEC("socket")
+__failure __msg("invalid read from stack R2 off=4096 size=4")
+__naked void key_lookup_at_invalid_fp(void)
+{
+ asm volatile (" \
+ r1 = %[map_array] ll; \
+ r2 = r10; \
+ r2 += 4096; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = *(u64*)(r0 + 0); \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array)
+ : __clobber_all);
+}
+
+volatile __u32 __attribute__((aligned(8))) global_key;
+
+SEC("socket")
+__description("invalid elided lookup using non-stack key")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+unsigned int non_stack_key_lookup(void)
+{
+ struct test_val *val;
+
+ global_key = 1;
+ val = bpf_map_lookup_elem(&map_array, (void *)&global_key);
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("doesn't reject UINT64_MAX as s64 for irrelevant maps")
+__success __retval(42)
+unsigned int doesnt_reject_irrelevant_maps(void)
+{
+ __u64 key = 0xFFFFFFFFFFFFFFFF;
+ struct test_val *val;
+
+ val = bpf_map_lookup_elem(&map_hash_48b, &key);
+ if (val)
+ return val->index;
+
+ return 42;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
new file mode 100644
index 000000000000..fb62e09f2114
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/basic_stack.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("socket")
+__description("stack out of bounds")
+__failure __msg("invalid write to stack")
+__failure_unpriv
+__naked void stack_out_of_bounds(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 + 8) = r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("uninitialized stack1")
+__success __log_level(4) __msg("stack depth 8")
+__failure_unpriv __msg_unpriv("invalid read from stack")
+__naked void uninitialized_stack1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("uninitialized stack2")
+__success __log_level(4) __msg("stack depth 8")
+__failure_unpriv __msg_unpriv("invalid read from stack")
+__naked void uninitialized_stack2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r0 = *(u64*)(r2 - 8); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("invalid fp arithmetic")
+__failure __msg("R1 subtraction from stack pointer")
+__failure_unpriv
+__naked void invalid_fp_arithmetic(void)
+{
+ /* If this gets ever changed, make sure JITs can deal with it. */
+ asm volatile (" \
+ r0 = 0; \
+ r1 = r10; \
+ r1 -= 8; \
+ *(u64*)(r1 + 0) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("non-invalid fp arithmetic")
+__success __success_unpriv __retval(0)
+__naked void non_invalid_fp_arithmetic(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ *(u64*)(r10 - 8) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("misaligned read from stack")
+__failure __msg("misaligned stack access")
+__failure_unpriv
+__naked void misaligned_read_from_stack(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r0 = *(u64*)(r2 - 4); \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bitfield_write.c b/tools/testing/selftests/bpf/progs/verifier_bitfield_write.c
new file mode 100644
index 000000000000..623f130a3198
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bitfield_write.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <stdint.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+#include "bpf_misc.h"
+
+struct core_reloc_bitfields {
+ /* unsigned bitfields */
+ uint8_t ub1: 1;
+ uint8_t ub2: 2;
+ uint32_t ub7: 7;
+ /* signed bitfields */
+ int8_t sb4: 4;
+ int32_t sb20: 20;
+ /* non-bitfields */
+ uint32_t u32;
+ int32_t s32;
+} __attribute__((preserve_access_index));
+
+SEC("tc")
+__description("single CO-RE bitfield roundtrip")
+__btf_path("btf__core_reloc_bitfields.bpf.o")
+__success
+__retval(3)
+int single_field_roundtrip(struct __sk_buff *ctx)
+{
+ struct core_reloc_bitfields bitfields;
+
+ __builtin_memset(&bitfields, 0, sizeof(bitfields));
+ BPF_CORE_WRITE_BITFIELD(&bitfields, ub2, 3);
+ return BPF_CORE_READ_BITFIELD(&bitfields, ub2);
+}
+
+SEC("tc")
+__description("multiple CO-RE bitfield roundtrip")
+__btf_path("btf__core_reloc_bitfields.bpf.o")
+__success
+__retval(0x3FD)
+int multiple_field_roundtrip(struct __sk_buff *ctx)
+{
+ struct core_reloc_bitfields bitfields;
+ uint8_t ub2;
+ int8_t sb4;
+
+ __builtin_memset(&bitfields, 0, sizeof(bitfields));
+ BPF_CORE_WRITE_BITFIELD(&bitfields, ub2, 1);
+ BPF_CORE_WRITE_BITFIELD(&bitfields, sb4, -1);
+
+ ub2 = BPF_CORE_READ_BITFIELD(&bitfields, ub2);
+ sb4 = BPF_CORE_READ_BITFIELD(&bitfields, sb4);
+
+ return (((uint8_t)sb4) << 2) | ub2;
+}
+
+SEC("tc")
+__description("adjacent CO-RE bitfield roundtrip")
+__btf_path("btf__core_reloc_bitfields.bpf.o")
+__success
+__retval(7)
+int adjacent_field_roundtrip(struct __sk_buff *ctx)
+{
+ struct core_reloc_bitfields bitfields;
+ uint8_t ub1, ub2;
+
+ __builtin_memset(&bitfields, 0, sizeof(bitfields));
+ BPF_CORE_WRITE_BITFIELD(&bitfields, ub1, 1);
+ BPF_CORE_WRITE_BITFIELD(&bitfields, ub2, 3);
+
+ ub1 = BPF_CORE_READ_BITFIELD(&bitfields, ub1);
+ ub2 = BPF_CORE_READ_BITFIELD(&bitfields, ub2);
+
+ return (ub2 << 1) | ub1;
+}
+
+SEC("tc")
+__description("multibyte CO-RE bitfield roundtrip")
+__btf_path("btf__core_reloc_bitfields.bpf.o")
+__success
+__retval(0x21)
+int multibyte_field_roundtrip(struct __sk_buff *ctx)
+{
+ struct core_reloc_bitfields bitfields;
+ uint32_t ub7;
+ uint8_t ub1;
+
+ __builtin_memset(&bitfields, 0, sizeof(bitfields));
+ BPF_CORE_WRITE_BITFIELD(&bitfields, ub1, 1);
+ BPF_CORE_WRITE_BITFIELD(&bitfields, ub7, 16);
+
+ ub1 = BPF_CORE_READ_BITFIELD(&bitfields, ub1);
+ ub7 = BPF_CORE_READ_BITFIELD(&bitfields, ub7);
+
+ return (ub7 << 1) | ub1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
new file mode 100644
index 000000000000..8bcddadfc4da
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Yafang Shao <laoar.shao@gmail.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+#include "task_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign,
+ u32 nr_bits) __ksym __weak;
+int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym __weak;
+void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym __weak;
+
+u64 bits_array[511] = {};
+
+SEC("iter.s/cgroup")
+__description("bits iter without destroy")
+__failure __msg("Unreleased reference")
+int BPF_PROG(no_destroy, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+ struct bpf_iter_bits it;
+ u64 data = 1;
+
+ bpf_iter_bits_new(&it, &data, 1);
+ bpf_iter_bits_next(&it);
+ return 0;
+}
+
+SEC("iter/cgroup")
+__description("uninitialized iter in ->next()")
+__failure __msg("expected an initialized iter_bits as arg #0")
+int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+ struct bpf_iter_bits it = {};
+
+ bpf_iter_bits_next(&it);
+ return 0;
+}
+
+SEC("iter/cgroup")
+__description("uninitialized iter in ->destroy()")
+__failure __msg("expected an initialized iter_bits as arg #0")
+int BPF_PROG(destroy_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+ struct bpf_iter_bits it = {};
+
+ bpf_iter_bits_destroy(&it);
+ return 0;
+}
+
+SEC("syscall")
+__description("null pointer")
+__success __retval(0)
+int null_pointer(void)
+{
+ struct bpf_iter_bits iter;
+ int err, nr = 0;
+ int *bit;
+
+ err = bpf_iter_bits_new(&iter, NULL, 1);
+ bpf_iter_bits_destroy(&iter);
+ if (err != -EINVAL)
+ return 1;
+
+ bpf_for_each(bits, bit, NULL, 1)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("bits copy")
+__success __retval(10)
+int bits_copy(void)
+{
+ u64 data = 0xf7310UL; /* 4 + 3 + 2 + 1 + 0*/
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data, 1)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("bits memalloc")
+__success __retval(64)
+int bits_memalloc(void)
+{
+ u64 data[2];
+ int nr = 0;
+ int *bit;
+
+ __builtin_memset(&data, 0xf0, sizeof(data)); /* 4 * 16 */
+ bpf_for_each(bits, bit, &data[0], ARRAY_SIZE(data))
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("bit index")
+__success __retval(8)
+int bit_index(void)
+{
+ u64 data = 0x100;
+ int bit_idx = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data, 1) {
+ if (*bit == 0)
+ continue;
+ bit_idx = *bit;
+ }
+ return bit_idx;
+}
+
+SEC("syscall")
+__description("bits too big")
+__success __retval(0)
+int bits_too_big(void)
+{
+ u64 data[4];
+ int nr = 0;
+ int *bit;
+
+ __builtin_memset(&data, 0xff, sizeof(data));
+ bpf_for_each(bits, bit, &data[0], 512) /* Be greater than 511 */
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("fewer words")
+__success __retval(1)
+int fewer_words(void)
+{
+ u64 data[2] = {0x1, 0xff};
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data[0], 1)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("zero words")
+__success __retval(0)
+int zero_words(void)
+{
+ u64 data[2] = {0x1, 0xff};
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data[0], 0)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("huge words")
+__success __retval(0)
+int huge_words(void)
+{
+ u64 data[8] = {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1};
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data[0], 67108865)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("max words")
+__success __retval(4)
+int max_words(void)
+{
+ volatile int nr = 0;
+ int *bit;
+
+ bits_array[0] = (1ULL << 63) | 1U;
+ bits_array[510] = (1ULL << 33) | (1ULL << 32);
+
+ bpf_for_each(bits, bit, bits_array, 511) {
+ if (nr == 0 && *bit != 0)
+ break;
+ if (nr == 2 && *bit != 32672)
+ break;
+ nr++;
+ }
+ return nr;
+}
+
+SEC("syscall")
+__description("bad words")
+__success __retval(0)
+int bad_words(void)
+{
+ void *bad_addr = (void *)-4095;
+ struct bpf_iter_bits iter;
+ volatile int nr;
+ int *bit;
+ int err;
+
+ err = bpf_iter_bits_new(&iter, bad_addr, 1);
+ bpf_iter_bits_destroy(&iter);
+ if (err != -EFAULT)
+ return 1;
+
+ nr = 0;
+ bpf_for_each(bits, bit, bad_addr, 1)
+ nr++;
+ if (nr != 0)
+ return 2;
+
+ err = bpf_iter_bits_new(&iter, bad_addr, 4);
+ bpf_iter_bits_destroy(&iter);
+ if (err != -EFAULT)
+ return 3;
+
+ nr = 0;
+ bpf_for_each(bits, bit, bad_addr, 4)
+ nr++;
+ if (nr != 0)
+ return 4;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c
new file mode 100644
index 000000000000..87a2c60d86e6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c
@@ -0,0 +1,1671 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/bounds.c */
+
+#include <linux/bpf.h>
+#include <../../../include/linux/filter.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("socket")
+__description("subtraction bounds (map value) variant 1")
+__failure __msg("R0 max value is outside of the allowed memory range")
+__failure_unpriv
+__naked void bounds_map_value_variant_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u8*)(r0 + 0); \
+ if r1 > 0xff goto l0_%=; \
+ r3 = *(u8*)(r0 + 1); \
+ if r3 > 0xff goto l0_%=; \
+ r1 -= r3; \
+ r1 >>= 56; \
+ r0 += r1; \
+ r0 = *(u8*)(r0 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("subtraction bounds (map value) variant 2")
+__failure
+__msg("R0 min value is negative, either use unsigned index or do a if (index >=0) check.")
+__msg_unpriv("R1 has unknown scalar with mixed signed bounds")
+__naked void bounds_map_value_variant_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u8*)(r0 + 0); \
+ if r1 > 0xff goto l0_%=; \
+ r3 = *(u8*)(r0 + 1); \
+ if r3 > 0xff goto l0_%=; \
+ r1 -= r3; \
+ r0 += r1; \
+ r0 = *(u8*)(r0 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check subtraction on pointers for unpriv")
+__success __failure_unpriv __msg_unpriv("R9 pointer -= pointer prohibited")
+__retval(0)
+__naked void subtraction_on_pointers_for_unpriv(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ r1 = %[map_hash_8b] ll; \
+ r2 = r10; \
+ r2 += -8; \
+ r6 = 9; \
+ *(u64*)(r2 + 0) = r6; \
+ call %[bpf_map_lookup_elem]; \
+ r9 = r10; \
+ r9 -= r0; \
+ r1 = %[map_hash_8b] ll; \
+ r2 = r10; \
+ r2 += -8; \
+ r6 = 0; \
+ *(u64*)(r2 + 0) = r6; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: *(u64*)(r0 + 0) = r9; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check based on zero-extended MOV")
+__success __success_unpriv __retval(0)
+__naked void based_on_zero_extended_mov(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ /* r2 = 0x0000'0000'ffff'ffff */ \
+ w2 = 0xffffffff; \
+ /* r2 = 0 */ \
+ r2 >>= 32; \
+ /* no-op */ \
+ r0 += r2; \
+ /* access at offset 0 */ \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: /* exit */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check based on sign-extended MOV. test1")
+__failure __msg("map_value pointer and 4294967295")
+__failure_unpriv
+__naked void on_sign_extended_mov_test1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ /* r2 = 0xffff'ffff'ffff'ffff */ \
+ r2 = 0xffffffff; \
+ /* r2 = 0xffff'ffff */ \
+ r2 >>= 32; \
+ /* r0 = <oob pointer> */ \
+ r0 += r2; \
+ /* access to OOB pointer */ \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: /* exit */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check based on sign-extended MOV. test2")
+__failure __msg("R0 min value is outside of the allowed memory range")
+__failure_unpriv
+__naked void on_sign_extended_mov_test2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ /* r2 = 0xffff'ffff'ffff'ffff */ \
+ r2 = 0xffffffff; \
+ /* r2 = 0xfff'ffff */ \
+ r2 >>= 36; \
+ /* r0 = <oob pointer> */ \
+ r0 += r2; \
+ /* access to OOB pointer */ \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: /* exit */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("bounds check based on reg_off + var_off + insn_off. test1")
+__failure __msg("value_size=8 off=1073741825")
+__naked void var_off_insn_off_test1(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r6 &= 1; \
+ r6 += %[__imm_0]; \
+ r0 += r6; \
+ r0 += %[__imm_0]; \
+l0_%=: r0 = *(u8*)(r0 + 3); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(__imm_0, (1 << 29) - 1),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("bounds check based on reg_off + var_off + insn_off. test2")
+__failure __msg("value 1073741823")
+__naked void var_off_insn_off_test2(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r6 &= 1; \
+ r6 += %[__imm_0]; \
+ r0 += r6; \
+ r0 += %[__imm_1]; \
+l0_%=: r0 = *(u8*)(r0 + 3); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(__imm_0, (1 << 30) - 1),
+ __imm_const(__imm_1, (1 << 29) - 1),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check after truncation of non-boundary-crossing range")
+__success __success_unpriv __retval(0)
+__naked void of_non_boundary_crossing_range(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ /* r1 = [0x00, 0xff] */ \
+ r1 = *(u8*)(r0 + 0); \
+ r2 = 1; \
+ /* r2 = 0x10'0000'0000 */ \
+ r2 <<= 36; \
+ /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ \
+ r1 += r2; \
+ /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ \
+ r1 += 0x7fffffff; \
+ /* r1 = [0x00, 0xff] */ \
+ w1 -= 0x7fffffff; \
+ /* r1 = 0 */ \
+ r1 >>= 8; \
+ /* no-op */ \
+ r0 += r1; \
+ /* access at offset 0 */ \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: /* exit */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check after truncation of boundary-crossing range (1)")
+__failure
+/* not actually fully unbounded, but the bound is very high */
+__msg("value -4294967168 makes map_value pointer be out of bounds")
+__failure_unpriv
+__naked void of_boundary_crossing_range_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ /* r1 = [0x00, 0xff] */ \
+ r1 = *(u8*)(r0 + 0); \
+ r1 += %[__imm_0]; \
+ /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \
+ r1 += %[__imm_0]; \
+ /* r1 = [0xffff'ff80, 0xffff'ffff] or \
+ * [0x0000'0000, 0x0000'007f] \
+ */ \
+ w1 += 0; \
+ r1 -= %[__imm_0]; \
+ /* r1 = [0x00, 0xff] or \
+ * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\
+ */ \
+ r1 -= %[__imm_0]; \
+ /* error on OOB pointer computation */ \
+ r0 += r1; \
+ /* exit */ \
+ r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(__imm_0, 0xffffff80 >> 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check after truncation of boundary-crossing range (2)")
+__failure __msg("value -4294967168 makes map_value pointer be out of bounds")
+__failure_unpriv
+__naked void of_boundary_crossing_range_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ /* r1 = [0x00, 0xff] */ \
+ r1 = *(u8*)(r0 + 0); \
+ r1 += %[__imm_0]; \
+ /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \
+ r1 += %[__imm_0]; \
+ /* r1 = [0xffff'ff80, 0xffff'ffff] or \
+ * [0x0000'0000, 0x0000'007f] \
+ * difference to previous test: truncation via MOV32\
+ * instead of ALU32. \
+ */ \
+ w1 = w1; \
+ r1 -= %[__imm_0]; \
+ /* r1 = [0x00, 0xff] or \
+ * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\
+ */ \
+ r1 -= %[__imm_0]; \
+ /* error on OOB pointer computation */ \
+ r0 += r1; \
+ /* exit */ \
+ r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(__imm_0, 0xffffff80 >> 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check after wrapping 32-bit addition")
+__success __success_unpriv __retval(0)
+__naked void after_wrapping_32_bit_addition(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ /* r1 = 0x7fff'ffff */ \
+ r1 = 0x7fffffff; \
+ /* r1 = 0xffff'fffe */ \
+ r1 += 0x7fffffff; \
+ /* r1 = 0 */ \
+ w1 += 2; \
+ /* no-op */ \
+ r0 += r1; \
+ /* access at offset 0 */ \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: /* exit */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check after shift with oversized count operand")
+__failure __msg("R0 max value is outside of the allowed memory range")
+__failure_unpriv
+__naked void shift_with_oversized_count_operand(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = 32; \
+ r1 = 1; \
+ /* r1 = (u32)1 << (u32)32 = ? */ \
+ w1 <<= w2; \
+ /* r1 = [0x0000, 0xffff] */ \
+ r1 &= 0xffff; \
+ /* computes unknown pointer, potentially OOB */ \
+ r0 += r1; \
+ /* potentially OOB access */ \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: /* exit */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check after right shift of maybe-negative number")
+__failure __msg("R0 unbounded memory access")
+__failure_unpriv
+__naked void shift_of_maybe_negative_number(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ /* r1 = [0x00, 0xff] */ \
+ r1 = *(u8*)(r0 + 0); \
+ /* r1 = [-0x01, 0xfe] */ \
+ r1 -= 1; \
+ /* r1 = 0 or 0xff'ffff'ffff'ffff */ \
+ r1 >>= 8; \
+ /* r1 = 0 or 0xffff'ffff'ffff */ \
+ r1 >>= 8; \
+ /* computes unknown pointer, potentially OOB */ \
+ r0 += r1; \
+ /* potentially OOB access */ \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: /* exit */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check after 32-bit right shift with 64-bit input")
+__failure __msg("math between map_value pointer and 4294967294 is not allowed")
+__failure_unpriv
+__naked void shift_with_64_bit_input(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 2; \
+ /* r1 = 1<<32 */ \
+ r1 <<= 31; \
+ /* r1 = 0 (NOT 2!) */ \
+ w1 >>= 31; \
+ /* r1 = 0xffff'fffe (NOT 0!) */ \
+ w1 -= 2; \
+ /* error on computing OOB pointer */ \
+ r0 += r1; \
+ /* exit */ \
+ r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check map access with off+size signed 32bit overflow. test1")
+__failure __msg("map_value pointer and 2147483646")
+__failure_unpriv
+__naked void size_signed_32bit_overflow_test1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r0 += 0x7ffffffe; \
+ r0 = *(u64*)(r0 + 0); \
+ goto l1_%=; \
+l1_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check map access with off+size signed 32bit overflow. test2")
+__failure __msg("pointer offset 1073741822")
+__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
+__naked void size_signed_32bit_overflow_test2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r0 += 0x1fffffff; \
+ r0 += 0x1fffffff; \
+ r0 += 0x1fffffff; \
+ r0 = *(u64*)(r0 + 0); \
+ goto l1_%=; \
+l1_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check map access with off+size signed 32bit overflow. test3")
+__failure __msg("pointer offset -1073741822")
+__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
+__naked void size_signed_32bit_overflow_test3(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r0 -= 0x1fffffff; \
+ r0 -= 0x1fffffff; \
+ r0 = *(u64*)(r0 + 2); \
+ goto l1_%=; \
+l1_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check map access with off+size signed 32bit overflow. test4")
+__failure __msg("map_value pointer and 1000000000000")
+__failure_unpriv
+__naked void size_signed_32bit_overflow_test4(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r1 = 1000000; \
+ r1 *= 1000000; \
+ r0 += r1; \
+ r0 = *(u64*)(r0 + 2); \
+ goto l1_%=; \
+l1_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check mixed 32bit and 64bit arithmetic. test1")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("exit")
+#endif
+__naked void _32bit_and_64bit_arithmetic_test1(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ r1 = -1; \
+ r1 <<= 32; \
+ r1 += 1; \
+ /* r1 = 0xffffFFFF00000001 */ \
+ if w1 > 1 goto l0_%=; \
+ /* check ALU64 op keeps 32bit bounds */ \
+ r1 += 1; \
+ if w1 > 2 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: /* invalid ldx if bounds are lost above */ \
+ r0 = *(u64*)(r0 - 1); \
+l1_%=: exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check mixed 32bit and 64bit arithmetic. test2")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("exit")
+#endif
+__naked void _32bit_and_64bit_arithmetic_test2(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ r1 = -1; \
+ r1 <<= 32; \
+ r1 += 1; \
+ /* r1 = 0xffffFFFF00000001 */ \
+ r2 = 3; \
+ /* r1 = 0x2 */ \
+ w1 += 1; \
+ /* check ALU32 op zero extends 64bit bounds */ \
+ if r1 > r2 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: /* invalid ldx if bounds are lost above */ \
+ r0 = *(u64*)(r0 - 1); \
+l1_%=: exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("assigning 32bit bounds to 64bit for wA = 0, wB = wA")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void for_wa_0_wb_wa(void)
+{
+ asm volatile (" \
+ r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data]); \
+ w9 = 0; \
+ w2 = w9; \
+ r6 = r7; \
+ r6 += r2; \
+ r3 = r6; \
+ r3 += 8; \
+ if r3 > r8 goto l0_%=; \
+ r5 = *(u32*)(r6 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check for reg = 0, reg xor 1")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if r1 != 0x0 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("r0 = 0")
+#endif
+__naked void reg_0_reg_xor_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r1 = 0; \
+ r1 ^= 1; \
+ if r1 != 0 goto l1_%=; \
+ r0 = *(u64*)(r0 + 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check for reg32 = 0, reg32 xor 1")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if w1 != 0x0 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("r0 = 0")
+#endif
+__naked void reg32_0_reg32_xor_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: w1 = 0; \
+ w1 ^= 1; \
+ if w1 != 0 goto l1_%=; \
+ r0 = *(u64*)(r0 + 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check for reg = 2, reg xor 3")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if r1 > 0x0 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("r0 = 0")
+#endif
+__naked void reg_2_reg_xor_3(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r1 = 2; \
+ r1 ^= 3; \
+ if r1 > 0 goto l1_%=; \
+ r0 = *(u64*)(r0 + 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check for reg = any, reg xor 3")
+__failure __msg("invalid access to map value")
+__msg_unpriv("invalid access to map value")
+__naked void reg_any_reg_xor_3(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r1 = *(u64*)(r0 + 0); \
+ r1 ^= 3; \
+ if r1 != 0 goto l1_%=; \
+ r0 = *(u64*)(r0 + 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check for reg32 = any, reg32 xor 3")
+__failure __msg("invalid access to map value")
+__msg_unpriv("invalid access to map value")
+__naked void reg32_any_reg32_xor_3(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r1 = *(u64*)(r0 + 0); \
+ w1 ^= 3; \
+ if w1 != 0 goto l1_%=; \
+ r0 = *(u64*)(r0 + 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check for reg > 0, reg xor 3")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if r1 >= 0x0 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("r0 = 0")
+#endif
+__naked void reg_0_reg_xor_3(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r1 = *(u64*)(r0 + 0); \
+ if r1 <= 0 goto l1_%=; \
+ r1 ^= 3; \
+ if r1 >= 0 goto l1_%=; \
+ r0 = *(u64*)(r0 + 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check for reg32 > 0, reg32 xor 3")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if w1 >= 0x0 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("r0 = 0")
+#endif
+__naked void reg32_0_reg32_xor_3(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r1 = *(u64*)(r0 + 0); \
+ if w1 <= 0 goto l1_%=; \
+ w1 ^= 3; \
+ if w1 >= 0 goto l1_%=; \
+ r0 = *(u64*)(r0 + 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check for non const xor src dst")
+__success __log_level(2)
+__msg("5: (af) r0 ^= r6 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=431,var_off=(0x0; 0x1af))")
+__naked void non_const_xor_src_dst(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r6 &= 0xaf; \
+ r0 &= 0x1a0; \
+ r0 ^= r6; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check for non const or src dst")
+__success __log_level(2)
+__msg("5: (4f) r0 |= r6 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=431,var_off=(0x0; 0x1af))")
+__naked void non_const_or_src_dst(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r6 &= 0xaf; \
+ r0 &= 0x1a0; \
+ r0 |= r6; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds check for non const mul regs")
+__success __log_level(2)
+__msg("5: (2f) r0 *= r6 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=3825,var_off=(0x0; 0xfff))")
+__naked void non_const_mul_regs(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r6 &= 0xff; \
+ r0 &= 0x0f; \
+ r0 *= r6; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks after 32-bit truncation. test 1")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(0)
+__naked void _32_bit_truncation_test_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u32*)(r0 + 0); \
+ /* This used to reduce the max bound to 0x7fffffff */\
+ if r1 == 0 goto l1_%=; \
+ if r1 > 0x7fffffff goto l0_%=; \
+l1_%=: r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks after 32-bit truncation. test 2")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(0)
+__naked void _32_bit_truncation_test_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u32*)(r0 + 0); \
+ if r1 s< 1 goto l1_%=; \
+ if w1 s< 0 goto l0_%=; \
+l1_%=: r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("bound check with JMP_JLT for crossing 64-bit signed boundary")
+__success __retval(0)
+__naked void crossing_64_bit_signed_boundary_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 1; \
+ if r1 > r3 goto l0_%=; \
+ r1 = *(u8*)(r2 + 0); \
+ r0 = 0x7fffffffffffff10 ll; \
+ r1 += r0; \
+ r0 = 0x8000000000000000 ll; \
+l1_%=: r0 += 1; \
+ /* r1 unsigned range is [0x7fffffffffffff10, 0x800000000000000f] */\
+ if r0 < r1 goto l1_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("bound check with JMP_JSLT for crossing 64-bit signed boundary")
+__success __retval(0)
+__flag(BPF_F_TEST_REG_INVARIANTS)
+__naked void crossing_64_bit_signed_boundary_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 1; \
+ if r1 > r3 goto l0_%=; \
+ r1 = *(u8*)(r2 + 0); \
+ r0 = 0x7fffffffffffff10 ll; \
+ r1 += r0; \
+ r2 = 0x8000000000000fff ll; \
+ r0 = 0x8000000000000000 ll; \
+l1_%=: r0 += 1; \
+ if r0 s> r2 goto l0_%=; \
+ /* r1 signed range is [S64_MIN, S64_MAX] */ \
+ if r0 s< r1 goto l1_%=; \
+ r0 = 1; \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("bound check for loop upper bound greater than U32_MAX")
+__success __retval(0)
+__naked void bound_greater_than_u32_max(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 1; \
+ if r1 > r3 goto l0_%=; \
+ r1 = *(u8*)(r2 + 0); \
+ r0 = 0x100000000 ll; \
+ r1 += r0; \
+ r0 = 0x100000000 ll; \
+l1_%=: r0 += 1; \
+ if r0 < r1 goto l1_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("bound check with JMP32_JLT for crossing 32-bit signed boundary")
+__success __retval(0)
+__naked void crossing_32_bit_signed_boundary_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 1; \
+ if r1 > r3 goto l0_%=; \
+ r1 = *(u8*)(r2 + 0); \
+ w0 = 0x7fffff10; \
+ w1 += w0; \
+ w0 = 0x80000000; \
+l1_%=: w0 += 1; \
+ /* r1 unsigned range is [0, 0x8000000f] */ \
+ if w0 < w1 goto l1_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("bound check with JMP32_JSLT for crossing 32-bit signed boundary")
+__success __retval(0)
+__flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */
+__naked void crossing_32_bit_signed_boundary_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 1; \
+ if r1 > r3 goto l0_%=; \
+ r1 = *(u8*)(r2 + 0); \
+ w0 = 0x7fffff10; \
+ w1 += w0; \
+ w2 = 0x80000fff; \
+ w0 = 0x80000000; \
+l1_%=: w0 += 1; \
+ if w0 s> w2 goto l0_%=; \
+ /* r1 signed range is [S32_MIN, S32_MAX] */ \
+ if w0 s< w1 goto l1_%=; \
+ r0 = 1; \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("bounds check with JMP_NE for reg edge")
+__success __retval(0)
+__naked void reg_not_equal_const(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ call %[bpf_get_prandom_u32]; \
+ r4 = r0; \
+ r4 &= 7; \
+ if r4 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: r1 = r6; \
+ r2 = 0; \
+ r3 = r10; \
+ r3 += -8; \
+ r5 = 0; \
+ /* The 4th argument of bpf_skb_store_bytes is defined as \
+ * ARG_CONST_SIZE, so 0 is not allowed. The 'r4 != 0' \
+ * is providing us this exclusion of zero from initial \
+ * [0, 7] range. \
+ */ \
+ call %[bpf_skb_store_bytes]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("bounds check with JMP_EQ for reg edge")
+__success __retval(0)
+__naked void reg_equal_const(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ call %[bpf_get_prandom_u32]; \
+ r4 = r0; \
+ r4 &= 7; \
+ if r4 == 0 goto l0_%=; \
+ r1 = r6; \
+ r2 = 0; \
+ r3 = r10; \
+ r3 += -8; \
+ r5 = 0; \
+ /* Just the same as what we do in reg_not_equal_const() */ \
+ call %[bpf_skb_store_bytes]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("multiply mixed sign bounds. test 1")
+__success __log_level(2)
+__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=0x1bc16d5cd4927ee1,smax=umax=0x1bc16d674ec80000,smax32=0x7ffffeff,umax32=0xfffffeff,var_off=(0x1bc16d4000000000; 0x3ffffffeff))")
+__naked void mult_mixed0_sign(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r6 &= 0xf;"
+ "r6 -= 1000000000;"
+ "r7 &= 0xf;"
+ "r7 -= 2000000000;"
+ "r6 *= r7;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("multiply mixed sign bounds. test 2")
+__success __log_level(2)
+__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=smin32=-100,smax=smax32=200)")
+__naked void mult_mixed1_sign(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r6 &= 0xf;"
+ "r6 -= 0xa;"
+ "r7 &= 0xf;"
+ "r7 -= 0x14;"
+ "r6 *= r7;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("multiply negative bounds")
+__success __log_level(2)
+__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=smin32=umin32=0x3ff280b0,smax=umax=smax32=umax32=0x3fff0001,var_off=(0x3ff00000; 0xf81ff))")
+__naked void mult_sign_bounds(void)
+{
+ asm volatile (
+ "r8 = 0x7fff;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r6 &= 0xa;"
+ "r6 -= r8;"
+ "r7 &= 0xf;"
+ "r7 -= r8;"
+ "r6 *= r7;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("multiply bounds that don't cross signed boundary")
+__success __log_level(2)
+__msg("r8 *= r6 {{.*}}; R6_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=11,var_off=(0x0; 0xb)) R8_w=scalar(smin=0,smax=umax=0x7b96bb0a94a3a7cd,var_off=(0x0; 0x7fffffffffffffff))")
+__naked void mult_no_sign_crossing(void)
+{
+ asm volatile (
+ "r6 = 0xb;"
+ "r8 = 0xb3c3f8c99262687 ll;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r6 &= r7;"
+ "r8 *= r6;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("multiplication overflow, result in unbounded reg. test 1")
+__success __log_level(2)
+__msg("r6 *= r7 {{.*}}; R6_w=scalar()")
+__naked void mult_unsign_ovf(void)
+{
+ asm volatile (
+ "r8 = 0x7ffffffffff ll;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r6 &= 0x7fffffff;"
+ "r7 &= r8;"
+ "r6 *= r7;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("multiplication overflow, result in unbounded reg. test 2")
+__success __log_level(2)
+__msg("r6 *= r7 {{.*}}; R6_w=scalar()")
+__naked void mult_sign_ovf(void)
+{
+ asm volatile (
+ "r8 = 0x7ffffffff ll;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r6 &= 0xa;"
+ "r6 -= r8;"
+ "r7 &= 0x7fffffff;"
+ "r6 *= r7;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("64-bit addition, all outcomes overflow")
+__success __log_level(2)
+__msg("5: (0f) r3 += r3 {{.*}} R3_w=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)")
+__retval(0)
+__naked void add64_full_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r4 = r0;"
+ "r3 = 0xa000000000000000 ll;"
+ "r3 |= r4;"
+ "r3 += r3;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("64-bit addition, partial overflow, result in unbounded reg")
+__success __log_level(2)
+__msg("4: (0f) r3 += r3 {{.*}} R3_w=scalar()")
+__retval(0)
+__naked void add64_partial_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r4 = r0;"
+ "r3 = 2;"
+ "r3 |= r4;"
+ "r3 += r3;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("32-bit addition overflow, all outcomes overflow")
+__success __log_level(2)
+__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=umin=umin32=0x40000000,smax=umax=umax32=0xfffffffe,var_off=(0x0; 0xffffffff))")
+__retval(0)
+__naked void add32_full_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "w4 = w0;"
+ "w3 = 0xa0000000;"
+ "w3 |= w4;"
+ "w3 += w3;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("32-bit addition, partial overflow, result in unbounded u32 bounds")
+__success __log_level(2)
+__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))")
+__retval(0)
+__naked void add32_partial_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "w4 = w0;"
+ "w3 = 2;"
+ "w3 |= w4;"
+ "w3 += w3;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("64-bit subtraction, all outcomes underflow")
+__success __log_level(2)
+__msg("6: (1f) r3 -= r1 {{.*}} R3_w=scalar(umin=1,umax=0x8000000000000000)")
+__retval(0)
+__naked void sub64_full_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r1 = r0;"
+ "r2 = 0x8000000000000000 ll;"
+ "r1 |= r2;"
+ "r3 = 0;"
+ "r3 -= r1;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("64-bit subtraction, partial overflow, result in unbounded reg")
+__success __log_level(2)
+__msg("3: (1f) r3 -= r2 {{.*}} R3_w=scalar()")
+__retval(0)
+__naked void sub64_partial_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r3 = r0;"
+ "r2 = 1;"
+ "r3 -= r2;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("32-bit subtraction overflow, all outcomes underflow")
+__success __log_level(2)
+__msg("5: (1c) w3 -= w1 {{.*}} R3_w=scalar(smin=umin=umin32=1,smax=umax=umax32=0x80000000,var_off=(0x0; 0xffffffff))")
+__retval(0)
+__naked void sub32_full_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "w1 = w0;"
+ "w2 = 0x80000000;"
+ "w1 |= w2;"
+ "w3 = 0;"
+ "w3 -= w1;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("32-bit subtraction, partial overflow, result in unbounded u32 bounds")
+__success __log_level(2)
+__msg("3: (1c) w3 -= w2 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))")
+__retval(0)
+__naked void sub32_partial_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "w3 = w0;"
+ "w2 = 1;"
+ "w3 -= w2;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("dead branch on jset, does not result in invariants violation error")
+__success __log_level(2)
+__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS)
+__naked void jset_range_analysis(void)
+{
+ asm volatile (" \
+ call %[bpf_get_netns_cookie]; \
+ if r0 == 0 goto l0_%=; \
+ if r0 & 0xffffffff goto +0; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_netns_cookie)
+ : __clobber_all);
+}
+
+/* This test covers the bounds deduction on 64bits when the s64 and u64 ranges
+ * overlap on the negative side. At instruction 7, the ranges look as follows:
+ *
+ * 0 umin=0xfffffcf1 umax=0xff..ff6e U64_MAX
+ * | [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
+ * |----------------------------|------------------------------|
+ * |xxxxxxxxxx] [xxxxxxxxxxxx|
+ * 0 smax=0xeffffeee smin=-655 -1
+ *
+ * We should therefore deduce the following new bounds:
+ *
+ * 0 u64=[0xff..ffd71;0xff..ff6e] U64_MAX
+ * | [xxx] |
+ * |----------------------------|------------------------------|
+ * | [xxx] |
+ * 0 s64=[-655;-146] -1
+ *
+ * Without the deduction cross sign boundary, we end up with an invariant
+ * violation error.
+ */
+SEC("socket")
+__description("bounds deduction cross sign boundary, negative overlap")
+__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS)
+__msg("7: (1f) r0 -= r6 {{.*}} R0=scalar(smin=smin32=-655,smax=smax32=-146,umin=0xfffffffffffffd71,umax=0xffffffffffffff6e,umin32=0xfffffd71,umax32=0xffffff6e,var_off=(0xfffffffffffffc00; 0x3ff))")
+__retval(0)
+__naked void bounds_deduct_negative_overlap(void)
+{
+ asm volatile(" \
+ call %[bpf_get_prandom_u32]; \
+ w3 = w0; \
+ w6 = (s8)w0; \
+ r0 = (s8)r0; \
+ if w6 >= 0xf0000000 goto l0_%=; \
+ r0 += r6; \
+ r6 += 400; \
+ r0 -= r6; \
+ if r3 < r0 goto l0_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* This test covers the bounds deduction on 64bits when the s64 and u64 ranges
+ * overlap on the positive side. At instruction 3, the ranges look as follows:
+ *
+ * 0 umin=0 umax=0xffffffffffffff00 U64_MAX
+ * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
+ * |----------------------------|------------------------------|
+ * |xxxxxxxx] [xxxxxxxx|
+ * 0 smax=127 smin=-128 -1
+ *
+ * We should therefore deduce the following new bounds:
+ *
+ * 0 u64=[0;127] U64_MAX
+ * [xxxxxxxx] |
+ * |----------------------------|------------------------------|
+ * [xxxxxxxx] |
+ * 0 s64=[0;127] -1
+ *
+ * Without the deduction cross sign boundary, the program is rejected due to
+ * the frame pointer write.
+ */
+SEC("socket")
+__description("bounds deduction cross sign boundary, positive overlap")
+__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS)
+__msg("3: (2d) if r0 > r1 {{.*}} R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=127,var_off=(0x0; 0x7f))")
+__retval(0)
+__naked void bounds_deduct_positive_overlap(void)
+{
+ asm volatile(" \
+ call %[bpf_get_prandom_u32]; \
+ r0 = (s8)r0; \
+ r1 = 0xffffffffffffff00; \
+ if r0 > r1 goto l0_%=; \
+ if r0 < 128 goto l0_%=; \
+ r10 = 0; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* This test is the same as above, but the s64 and u64 ranges overlap in two
+ * places. At instruction 3, the ranges look as follows:
+ *
+ * 0 umin=0 umax=0xffffffffffffff80 U64_MAX
+ * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
+ * |----------------------------|------------------------------|
+ * |xxxxxxxx] [xxxxxxxx|
+ * 0 smax=127 smin=-128 -1
+ *
+ * 0xffffffffffffff80 = (u64)-128. We therefore can't deduce anything new and
+ * the program should fail due to the frame pointer write.
+ */
+SEC("socket")
+__description("bounds deduction cross sign boundary, two overlaps")
+__failure __flag(BPF_F_TEST_REG_INVARIANTS)
+__msg("3: (2d) if r0 > r1 {{.*}} R0_w=scalar(smin=smin32=-128,smax=smax32=127,umax=0xffffffffffffff80)")
+__msg("frame pointer is read only")
+__naked void bounds_deduct_two_overlaps(void)
+{
+ asm volatile(" \
+ call %[bpf_get_prandom_u32]; \
+ r0 = (s8)r0; \
+ r1 = 0xffffffffffffff80; \
+ if r0 > r1 goto l0_%=; \
+ if r0 < 128 goto l0_%=; \
+ r10 = 0; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c
new file mode 100644
index 000000000000..260a6df264e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/bounds_deduction.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("check deducing bounds from const, 1")
+__failure __msg("R0 tried to subtract pointer from scalar")
+__msg_unpriv("R1 has pointer with unsupported alu operation")
+__naked void deducing_bounds_from_const_1(void)
+{
+ asm volatile (" \
+ r0 = 1; \
+ if r0 s>= 1 goto l0_%=; \
+l0_%=: r0 -= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from const, 2")
+__success __failure_unpriv
+__msg_unpriv("R1 has pointer with unsupported alu operation")
+__retval(1)
+__naked void deducing_bounds_from_const_2(void)
+{
+ asm volatile (" \
+ r0 = 1; \
+ if r0 s>= 1 goto l0_%=; \
+ exit; \
+l0_%=: if r0 s<= 1 goto l1_%=; \
+ exit; \
+l1_%=: r1 -= r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from const, 3")
+__failure __msg("R0 tried to subtract pointer from scalar")
+__msg_unpriv("R1 has pointer with unsupported alu operation")
+__naked void deducing_bounds_from_const_3(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ if r0 s<= 0 goto l0_%=; \
+l0_%=: r0 -= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from const, 4")
+__success __failure_unpriv
+__msg_unpriv("R6 has pointer with unsupported alu operation")
+__retval(0)
+__naked void deducing_bounds_from_const_4(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r0 = 0; \
+ if r0 s<= 0 goto l0_%=; \
+ exit; \
+l0_%=: if r0 s>= 0 goto l1_%=; \
+ exit; \
+l1_%=: r6 -= r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from const, 5")
+__failure __msg("R0 tried to subtract pointer from scalar")
+__msg_unpriv("R1 has pointer with unsupported alu operation")
+__naked void deducing_bounds_from_const_5(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ if r0 s>= 1 goto l0_%=; \
+ r0 -= r1; \
+l0_%=: exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from const, 6")
+__failure __msg("R0 tried to subtract pointer from scalar")
+__msg_unpriv("R1 has pointer with unsupported alu operation")
+__naked void deducing_bounds_from_const_6(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ if r0 s>= 0 goto l0_%=; \
+ exit; \
+l0_%=: r0 -= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from const, 7")
+__failure __msg("dereference of modified ctx ptr")
+__msg_unpriv("R1 has pointer with unsupported alu operation")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void deducing_bounds_from_const_7(void)
+{
+ asm volatile (" \
+ r0 = %[__imm_0]; \
+ if r0 s>= 0 goto l0_%=; \
+l0_%=: r1 -= r0; \
+ r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
+ exit; \
+" :
+ : __imm_const(__imm_0, ~0),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from const, 8")
+__failure __msg("negative offset ctx ptr R1 off=-1 disallowed")
+__msg_unpriv("R1 has pointer with unsupported alu operation")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void deducing_bounds_from_const_8(void)
+{
+ asm volatile (" \
+ r0 = %[__imm_0]; \
+ if r0 s>= 0 goto l0_%=; \
+ r1 += r0; \
+l0_%=: r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
+ exit; \
+" :
+ : __imm_const(__imm_0, ~0),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from const, 9")
+__failure __msg("R0 tried to subtract pointer from scalar")
+__msg_unpriv("R1 has pointer with unsupported alu operation")
+__naked void deducing_bounds_from_const_9(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ if r0 s>= 0 goto l0_%=; \
+l0_%=: r0 -= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from const, 10")
+__failure
+__msg("math between ctx pointer and register with unbounded min value is not allowed")
+__failure_unpriv
+__naked void deducing_bounds_from_const_10(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r0 = 0; \
+ if r0 s<= 0 goto l0_%=; \
+l0_%=: /* Marks r0 as unknown. */ \
+ call %[bpf_get_prandom_u32]; \
+ r0 -= r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c
new file mode 100644
index 000000000000..823f727cf210
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <non_const> == <const>, 1")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_1(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 < 3 goto l0_%=; \
+ r2 = 2; \
+ if r0 == r2 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <non_const> == <const>, 2")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_2(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 > 3 goto l0_%=; \
+ r2 = 4; \
+ if r0 == r2 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <non_const> != <const>, 1")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_3(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 < 3 goto l0_%=; \
+ r2 = 2; \
+ if r0 != r2 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <non_const> != <const>, 2")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_4(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 > 3 goto l0_%=; \
+ r2 = 4; \
+ if r0 != r2 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <non_const> == <const>, 1")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_5(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 < 4 goto l0_%=; \
+ w2 = 3; \
+ if w0 == w2 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <non_const> == <const>, 2")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_6(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 > 4 goto l0_%=; \
+ w2 = 5; \
+ if w0 == w2 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <non_const> != <const>, 1")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_7(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 < 3 goto l0_%=; \
+ w2 = 2; \
+ if w0 != w2 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <non_const> != <const>, 2")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_8(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 > 3 goto l0_%=; \
+ w2 = 4; \
+ if w0 != w2 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <const> > <non_const>, 1")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_9(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ r2 = 0; \
+ if r2 > r0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <const> > <non_const>, 2")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_10(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 < 4 goto l0_%=; \
+ r2 = 4; \
+ if r2 > r0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <const> >= <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_11(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 < 4 goto l0_%=; \
+ r2 = 3; \
+ if r2 >= r0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <const> < <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_12(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 > 4 goto l0_%=; \
+ r2 = 4; \
+ if r2 < r0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <const> <= <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_13(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 >= 4 goto l0_%=; \
+ r2 = 4; \
+ if r2 <= r0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <const> == <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_14(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 < 3 goto l0_%=; \
+ r2 = 2; \
+ if r2 == r0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <const> s> <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_15(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 s< 4 goto l0_%=; \
+ r2 = 4; \
+ if r2 s> r0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <const> s>= <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_16(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 s< 4 goto l0_%=; \
+ r2 = 3; \
+ if r2 s>= r0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <const> s< <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_17(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 s> 4 goto l0_%=; \
+ r2 = 4; \
+ if r2 s< r0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <const> s<= <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_18(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 s> 4 goto l0_%=; \
+ r2 = 5; \
+ if r2 s<= r0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp64, <const> != <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_19(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 < 3 goto l0_%=; \
+ r2 = 2; \
+ if r2 != r0 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <const> > <non_const>, 1")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_20(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ w2 = 0; \
+ if w2 > w0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <const> > <non_const>, 2")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_21(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 < 4 goto l0_%=; \
+ w2 = 4; \
+ if w2 > w0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <const> >= <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_22(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 < 4 goto l0_%=; \
+ w2 = 3; \
+ if w2 >= w0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <const> < <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_23(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 > 4 goto l0_%=; \
+ w2 = 4; \
+ if w2 < w0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <const> <= <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_24(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 >= 4 goto l0_%=; \
+ w2 = 4; \
+ if w2 <= w0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <const> == <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_25(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 < 4 goto l0_%=; \
+ w2 = 3; \
+ if w2 == w0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <const> s> <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_26(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 s< 4 goto l0_%=; \
+ w2 = 4; \
+ if w2 s> w0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <const> s>= <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_27(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 s< 4 goto l0_%=; \
+ w2 = 3; \
+ if w2 s>= w0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <const> s< <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_28(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 s> 4 goto l0_%=; \
+ w2 = 5; \
+ if w2 s< w0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <const> s<= <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_29(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 s>= 4 goto l0_%=; \
+ w2 = 4; \
+ if w2 s<= w0 goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check deducing bounds from non-const, jmp32, <const> != <non_const>")
+__success __retval(0)
+__naked void deducing_bounds_from_non_const_30(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if w0 < 3 goto l0_%=; \
+ w2 = 2; \
+ if w2 != w0 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: \
+ r0 = 0; \
+ exit; \
+l1_%=: \
+ r0 -= r1; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c
new file mode 100644
index 000000000000..4f40144748a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, positive bounds")
+__failure __msg("unbounded min value")
+__failure_unpriv
+__naked void signed_and_unsigned_positive_bounds(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = 2; \
+ if r2 >= r1 goto l0_%=; \
+ if r1 s> 4 goto l0_%=; \
+ r0 += r1; \
+ r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned")
+__failure __msg("unbounded min value")
+__failure_unpriv
+__naked void checks_mixing_signed_and_unsigned(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = -1; \
+ if r1 > r2 goto l0_%=; \
+ if r1 s> 1 goto l0_%=; \
+ r0 += r1; \
+ r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 2")
+__failure __msg("unbounded min value")
+__failure_unpriv
+__naked void signed_and_unsigned_variant_2(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = -1; \
+ if r1 > r2 goto l0_%=; \
+ r8 = 0; \
+ r8 += r1; \
+ if r8 s> 1 goto l0_%=; \
+ r0 += r8; \
+ r0 = 0; \
+ *(u8*)(r8 + 0) = r0; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 3")
+__failure __msg("unbounded min value")
+__failure_unpriv
+__naked void signed_and_unsigned_variant_3(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = -1; \
+ if r1 > r2 goto l0_%=; \
+ r8 = r1; \
+ if r8 s> 1 goto l0_%=; \
+ r0 += r8; \
+ r0 = 0; \
+ *(u8*)(r8 + 0) = r0; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 4")
+__success __success_unpriv __retval(0)
+__naked void signed_and_unsigned_variant_4(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = 1; \
+ r1 &= r2; \
+ if r1 s> 1 goto l0_%=; \
+ r0 += r1; \
+ r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 5")
+__failure __msg("unbounded min value")
+__failure_unpriv
+__naked void signed_and_unsigned_variant_5(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = -1; \
+ if r1 > r2 goto l0_%=; \
+ if r1 s> 1 goto l0_%=; \
+ r0 += 4; \
+ r0 -= r1; \
+ r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+ r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 6")
+__failure __msg("R4 min value is negative, either use unsigned")
+__failure_unpriv
+__naked void signed_and_unsigned_variant_6(void)
+{
+ asm volatile (" \
+ r9 = r1; \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = r9; \
+ r2 = 0; \
+ r3 = r10; \
+ r3 += -512; \
+ r4 = *(u64*)(r10 - 16); \
+ r6 = -1; \
+ if r4 > r6 goto l0_%=; \
+ if r4 s> 1 goto l0_%=; \
+ r4 += 1; \
+ r5 = 0; \
+ r6 = 0; \
+ *(u16*)(r10 - 512) = r6; \
+ call %[bpf_skb_load_bytes]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_skb_load_bytes)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 7")
+__success __success_unpriv __retval(0)
+__naked void signed_and_unsigned_variant_7(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = %[__imm_0]; \
+ if r1 > r2 goto l0_%=; \
+ if r1 s> 1 goto l0_%=; \
+ r0 += r1; \
+ r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(__imm_0, 1024 * 1024 * 1024)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 8")
+__failure __msg("unbounded min value")
+__failure_unpriv
+__naked void signed_and_unsigned_variant_8(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = -1; \
+ if r2 > r1 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: if r1 s> 1 goto l0_%=; \
+ r0 += r1; \
+ r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 9")
+__success __success_unpriv __retval(0)
+__naked void signed_and_unsigned_variant_9(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = -9223372036854775808ULL ll; \
+ if r2 > r1 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: if r1 s> 1 goto l0_%=; \
+ r0 += r1; \
+ r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 10")
+__failure __msg("unbounded min value")
+__failure_unpriv
+__naked void signed_and_unsigned_variant_10(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = -1; \
+ if r2 > r1 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: if r1 s> 1 goto l0_%=; \
+ r0 += r1; \
+ r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 11")
+__failure __msg("unbounded min value")
+__failure_unpriv
+__naked void signed_and_unsigned_variant_11(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = -1; \
+ if r2 >= r1 goto l1_%=; \
+ /* Dead branch. */ \
+ r0 = 0; \
+ exit; \
+l1_%=: if r1 s> 1 goto l0_%=; \
+ r0 += r1; \
+ r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 12")
+__failure __msg("unbounded min value")
+__failure_unpriv
+__naked void signed_and_unsigned_variant_12(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = -6; \
+ if r2 >= r1 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: if r1 s> 1 goto l0_%=; \
+ r0 += r1; \
+ r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 13")
+__failure __msg("unbounded min value")
+__failure_unpriv
+__naked void signed_and_unsigned_variant_13(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = 2; \
+ if r2 >= r1 goto l0_%=; \
+ r7 = 1; \
+ if r7 s> 0 goto l1_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+l1_%=: r7 += r1; \
+ if r7 s> 4 goto l2_%=; \
+ r0 += r7; \
+ r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+l2_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 14")
+__failure __msg("unbounded min value")
+__failure_unpriv
+__naked void signed_and_unsigned_variant_14(void)
+{
+ asm volatile (" \
+ r9 = *(u32*)(r1 + %[__sk_buff_mark]); \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = -1; \
+ r8 = 2; \
+ if r9 == 42 goto l1_%=; \
+ if r8 s> r1 goto l2_%=; \
+l3_%=: if r1 s> 1 goto l2_%=; \
+ r0 += r1; \
+l0_%=: r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+l2_%=: r0 = 0; \
+ exit; \
+l1_%=: if r1 > r2 goto l2_%=; \
+ goto l3_%=; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bounds checks mixing signed and unsigned, variant 15")
+__failure __msg("unbounded min value")
+__failure_unpriv
+__naked void signed_and_unsigned_variant_15(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r10 - 16); \
+ r2 = -6; \
+ if r2 >= r1 goto l1_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+l1_%=: r0 += r1; \
+ if r0 > 1 goto l2_%=; \
+ r0 = 0; \
+ exit; \
+l2_%=: r1 = 0; \
+ *(u8*)(r0 + 0) = r1; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
new file mode 100644
index 000000000000..c258b0722e04
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
@@ -0,0 +1,883 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4) __msg("stack depth 8")
+__xlated("4: r5 = 5")
+__xlated("5: r0 = ")
+__xlated("6: r0 = &(void __percpu *)(r0)")
+__xlated("7: r0 = *(u32 *)(r0 +0)")
+__xlated("8: exit")
+__success
+__naked void simple(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "r2 = 2;"
+ "r3 = 3;"
+ "r4 = 4;"
+ "r5 = 5;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "*(u64 *)(r10 - 24) = r2;"
+ "*(u64 *)(r10 - 32) = r3;"
+ "*(u64 *)(r10 - 40) = r4;"
+ "*(u64 *)(r10 - 48) = r5;"
+ "call %[bpf_get_smp_processor_id];"
+ "r5 = *(u64 *)(r10 - 48);"
+ "r4 = *(u64 *)(r10 - 40);"
+ "r3 = *(u64 *)(r10 - 32);"
+ "r2 = *(u64 *)(r10 - 24);"
+ "r1 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+/* The logic for detecting and verifying bpf_fastcall pattern is the same for
+ * any arch, however x86 differs from arm64 or riscv64 in a way
+ * bpf_get_smp_processor_id is rewritten:
+ * - on x86 it is done by verifier
+ * - on arm64 and riscv64 it is done by jit
+ *
+ * Which leads to different xlated patterns for different archs:
+ * - on x86 the call is expanded as 3 instructions
+ * - on arm64 and riscv64 the call remains as is
+ * (but spills/fills are still removed)
+ *
+ * It is really desirable to check instruction indexes in the xlated
+ * patterns, so add this canary test to check that function rewrite by
+ * jit is correctly processed by bpf_fastcall logic, keep the rest of the
+ * tests as x86.
+ */
+SEC("raw_tp")
+__arch_arm64
+__arch_riscv64
+__xlated("0: r1 = 1")
+__xlated("1: call bpf_get_smp_processor_id")
+__xlated("2: exit")
+__success
+__naked void canary_arm64_riscv64(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("3: exit")
+__success
+__naked void canary_zero_spills(void)
+{
+ asm volatile (
+ "call %[bpf_get_smp_processor_id];"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4) __msg("stack depth 16")
+__xlated("1: *(u64 *)(r10 -16) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r2 = *(u64 *)(r10 -16)")
+__success
+__naked void wrong_reg_in_pattern1(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r2 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -16) = r6")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r6 = *(u64 *)(r10 -16)")
+__success
+__naked void wrong_reg_in_pattern2(void)
+{
+ asm volatile (
+ "r6 = 1;"
+ "*(u64 *)(r10 - 16) = r6;"
+ "call %[bpf_get_smp_processor_id];"
+ "r6 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -16) = r0")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r0 = *(u64 *)(r10 -16)")
+__success
+__naked void wrong_reg_in_pattern3(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "call %[bpf_get_smp_processor_id];"
+ "r0 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("2: *(u64 *)(r2 -16) = r1")
+__xlated("...")
+__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("6: r1 = *(u64 *)(r10 -16)")
+__success
+__naked void wrong_base_in_pattern(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "r2 = r10;"
+ "*(u64 *)(r2 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -16) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r2 = 1")
+__success
+__naked void wrong_insn_in_pattern(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r2 = 1;"
+ "r1 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("2: *(u64 *)(r10 -16) = r1")
+__xlated("...")
+__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("6: r1 = *(u64 *)(r10 -8)")
+__success
+__naked void wrong_off_in_pattern1(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u32 *)(r10 -4) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u32 *)(r10 -4)")
+__success
+__naked void wrong_off_in_pattern2(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u32 *)(r10 - 4) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u32 *)(r10 - 4);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u32 *)(r10 -16) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u32 *)(r10 -16)")
+__success
+__naked void wrong_size_in_pattern(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u32 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u32 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("2: *(u32 *)(r10 -8) = r1")
+__xlated("...")
+__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("6: r1 = *(u32 *)(r10 -8)")
+__success
+__naked void partial_pattern(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "r2 = 2;"
+ "*(u32 *)(r10 - 8) = r1;"
+ "*(u64 *)(r10 - 16) = r2;"
+ "call %[bpf_get_smp_processor_id];"
+ "r2 = *(u64 *)(r10 - 16);"
+ "r1 = *(u32 *)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("0: r1 = 1")
+__xlated("1: r2 = 2")
+/* not patched, spills for -8, -16 not removed */
+__xlated("2: *(u64 *)(r10 -8) = r1")
+__xlated("3: *(u64 *)(r10 -16) = r2")
+__xlated("...")
+__xlated("5: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("7: r2 = *(u64 *)(r10 -16)")
+__xlated("8: r1 = *(u64 *)(r10 -8)")
+/* patched, spills for -24, -32 removed */
+__xlated("...")
+__xlated("10: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("12: exit")
+__success
+__naked void min_stack_offset(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "r2 = 2;"
+ /* this call won't be patched */
+ "*(u64 *)(r10 - 8) = r1;"
+ "*(u64 *)(r10 - 16) = r2;"
+ "call %[bpf_get_smp_processor_id];"
+ "r2 = *(u64 *)(r10 - 16);"
+ "r1 = *(u64 *)(r10 - 8);"
+ /* this call would be patched */
+ "*(u64 *)(r10 - 24) = r1;"
+ "*(u64 *)(r10 - 32) = r2;"
+ "call %[bpf_get_smp_processor_id];"
+ "r2 = *(u64 *)(r10 - 32);"
+ "r1 = *(u64 *)(r10 - 24);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u64 *)(r10 -8)")
+__success
+__naked void bad_fixed_read(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r1 = *(u64 *)(r1 - 0);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u64 *)(r10 -8)")
+__success
+__naked void bad_fixed_write(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r1 = r10;"
+ "r1 += -8;"
+ "*(u64 *)(r1 - 0) = r1;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("6: *(u64 *)(r10 -16) = r1")
+__xlated("...")
+__xlated("8: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("10: r1 = *(u64 *)(r10 -16)")
+__success
+__naked void bad_varying_read(void)
+{
+ asm volatile (
+ "r6 = *(u64 *)(r1 + 0);" /* random scalar value */
+ "r6 &= 0x7;" /* r6 range [0..7] */
+ "r6 += 0x2;" /* r6 range [2..9] */
+ "r7 = 0;"
+ "r7 -= r6;" /* r7 range [-9..-2] */
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "r1 = r10;"
+ "r1 += r7;"
+ "r1 = *(u8 *)(r1 - 0);" /* touches slot [-16..-9] where spills are stored */
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("6: *(u64 *)(r10 -16) = r1")
+__xlated("...")
+__xlated("8: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("10: r1 = *(u64 *)(r10 -16)")
+__success
+__naked void bad_varying_write(void)
+{
+ asm volatile (
+ "r6 = *(u64 *)(r1 + 0);" /* random scalar value */
+ "r6 &= 0x7;" /* r6 range [0..7] */
+ "r6 += 0x2;" /* r6 range [2..9] */
+ "r7 = 0;"
+ "r7 -= r6;" /* r7 range [-9..-2] */
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "r1 = r10;"
+ "r1 += r7;"
+ "*(u8 *)(r1 - 0) = r7;" /* touches slot [-16..-9] where spills are stored */
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u64 *)(r10 -8)")
+__success
+__naked void bad_write_in_subprog(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call bad_write_in_subprog_aux;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+__used
+__naked static void bad_write_in_subprog_aux(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "*(u64 *)(r1 - 0) = r0;" /* invalidates bpf_fastcall contract for caller: */
+ "exit;" /* caller stack at -8 used outside of the pattern */
+ ::: __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u64 *)(r10 -8)")
+__success
+__naked void bad_helper_write(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ /* bpf_fastcall pattern with stack offset -8 */
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 1;"
+ "r3 = 42;"
+ /* read dst is fp[-8], thus bpf_fastcall rewrite not applied */
+ "call %[bpf_probe_read_kernel];"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id),
+ __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+/* main, not patched */
+__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u64 *)(r10 -8)")
+__xlated("...")
+__xlated("9: call pc+1")
+__xlated("...")
+__xlated("10: exit")
+/* subprogram, patched */
+__xlated("11: r1 = 1")
+__xlated("...")
+__xlated("13: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("15: exit")
+__success
+__naked void invalidate_one_subprog(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r1 = *(u64 *)(r1 - 0);"
+ "call invalidate_one_subprog_aux;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+__used
+__naked static void invalidate_one_subprog_aux(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+/* main */
+__xlated("0: r1 = 1")
+__xlated("...")
+__xlated("2: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("4: call pc+1")
+__xlated("5: exit")
+/* subprogram */
+__xlated("6: r1 = 1")
+__xlated("...")
+__xlated("8: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("10: *(u64 *)(r10 -16) = r1")
+__xlated("11: exit")
+__success
+__naked void subprogs_use_independent_offsets(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "call subprogs_use_independent_offsets_aux;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+__used
+__naked static void subprogs_use_independent_offsets_aux(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 24) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 24);"
+ "*(u64 *)(r10 - 16) = r1;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4) __msg("stack depth 8")
+__xlated("2: r0 = &(void __percpu *)(r0)")
+__success
+__naked void helper_call_does_not_prevent_bpf_fastcall(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_prandom_u32];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4) __msg("stack depth 24")
+/* may_goto counter at -24 */
+__xlated("0: *(u64 *)(r10 -24) =")
+/* may_goto timestamp at -16 */
+__xlated("1: *(u64 *)(r10 -16) =")
+__xlated("2: r1 = 1")
+__xlated("...")
+__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+/* may_goto expansion starts */
+__xlated("6: r11 = *(u64 *)(r10 -24)")
+__xlated("7: if r11 == 0x0 goto pc+6")
+__xlated("8: r11 -= 1")
+__xlated("9: if r11 != 0x0 goto pc+2")
+__xlated("10: r11 = -24")
+__xlated("11: call unknown")
+__xlated("12: *(u64 *)(r10 -24) = r11")
+/* may_goto expansion ends */
+__xlated("13: *(u64 *)(r10 -8) = r1")
+__xlated("14: exit")
+__success
+__naked void may_goto_interaction_x86_64(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ ".8byte %[may_goto];"
+ /* just touch some stack at -8 */
+ "*(u64 *)(r10 - 8) = r1;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id),
+ __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, +1 /* offset */, 0))
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_arm64
+__log_level(4) __msg("stack depth 16")
+/* may_goto counter at -16 */
+__xlated("0: *(u64 *)(r10 -16) =")
+__xlated("1: r1 = 1")
+__xlated("2: call bpf_get_smp_processor_id")
+/* may_goto expansion starts */
+__xlated("3: r11 = *(u64 *)(r10 -16)")
+__xlated("4: if r11 == 0x0 goto pc+3")
+__xlated("5: r11 -= 1")
+__xlated("6: *(u64 *)(r10 -16) = r11")
+/* may_goto expansion ends */
+__xlated("7: *(u64 *)(r10 -8) = r1")
+__xlated("8: exit")
+__success
+__naked void may_goto_interaction_arm64(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ ".8byte %[may_goto];"
+ /* just touch some stack at -8 */
+ "*(u64 *)(r10 - 8) = r1;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id),
+ __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, +1 /* offset */, 0))
+ : __clobber_all);
+}
+
+__used
+__naked static void dummy_loop_callback(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4) __msg("stack depth 32+0")
+__xlated("2: r1 = 1")
+__xlated("3: r0 =")
+__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("5: r0 = *(u32 *)(r0 +0)")
+/* bpf_loop params setup */
+__xlated("6: r2 =")
+__xlated("7: r3 = 0")
+__xlated("8: r4 = 0")
+__xlated("...")
+/* ... part of the inlined bpf_loop */
+__xlated("12: *(u64 *)(r10 -32) = r6")
+__xlated("13: *(u64 *)(r10 -24) = r7")
+__xlated("14: *(u64 *)(r10 -16) = r8")
+__xlated("...")
+__xlated("21: call pc+8") /* dummy_loop_callback */
+/* ... last insns of the bpf_loop_interaction1 */
+__xlated("...")
+__xlated("28: r0 = 0")
+__xlated("29: exit")
+/* dummy_loop_callback */
+__xlated("30: r0 = 0")
+__xlated("31: exit")
+__success
+__naked int bpf_loop_interaction1(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ /* bpf_fastcall stack region at -16, but could be removed */
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "r2 = %[dummy_loop_callback];"
+ "r3 = 0;"
+ "r4 = 0;"
+ "call %[bpf_loop];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_ptr(dummy_loop_callback),
+ __imm(bpf_get_smp_processor_id),
+ __imm(bpf_loop)
+ : __clobber_common
+ );
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4) __msg("stack depth 40+0")
+/* call bpf_get_smp_processor_id */
+__xlated("2: r1 = 42")
+__xlated("3: r0 =")
+__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("5: r0 = *(u32 *)(r0 +0)")
+/* call bpf_get_prandom_u32 */
+__xlated("6: *(u64 *)(r10 -16) = r1")
+__xlated("7: call")
+__xlated("8: r1 = *(u64 *)(r10 -16)")
+__xlated("...")
+/* ... part of the inlined bpf_loop */
+__xlated("15: *(u64 *)(r10 -40) = r6")
+__xlated("16: *(u64 *)(r10 -32) = r7")
+__xlated("17: *(u64 *)(r10 -24) = r8")
+__success
+__naked int bpf_loop_interaction2(void)
+{
+ asm volatile (
+ "r1 = 42;"
+ /* bpf_fastcall stack region at -16, cannot be removed */
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_prandom_u32];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "r2 = %[dummy_loop_callback];"
+ "r3 = 0;"
+ "r4 = 0;"
+ "call %[bpf_loop];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_ptr(dummy_loop_callback),
+ __imm(bpf_get_smp_processor_id),
+ __imm(bpf_get_prandom_u32),
+ __imm(bpf_loop)
+ : __clobber_common
+ );
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4)
+__msg("stack depth 512+0")
+/* just to print xlated version when debugging */
+__xlated("r0 = &(void __percpu *)(r0)")
+__success
+/* cumulative_stack_depth() stack usage is MAX_BPF_STACK,
+ * called subprogram uses an additional slot for bpf_fastcall spill/fill,
+ * since bpf_fastcall spill/fill could be removed the program still fits
+ * in MAX_BPF_STACK and should be accepted.
+ */
+__naked int cumulative_stack_depth(void)
+{
+ asm volatile(
+ "r1 = 42;"
+ "*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
+ "call cumulative_stack_depth_subprog;"
+ "exit;"
+ :
+ : __imm_const(max_bpf_stack, MAX_BPF_STACK)
+ : __clobber_all
+ );
+}
+
+__used
+__naked static void cumulative_stack_depth_subprog(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "exit;"
+ :: __imm(bpf_get_smp_processor_id) : __clobber_all);
+}
+
+SEC("cgroup/getsockname_unix")
+__xlated("0: r2 = 1")
+/* bpf_cast_to_kern_ctx is replaced by a single assignment */
+__xlated("1: r0 = r1")
+__xlated("2: r0 = r2")
+__xlated("3: exit")
+__success
+__naked void kfunc_bpf_cast_to_kern_ctx(void)
+{
+ asm volatile (
+ "r2 = 1;"
+ "*(u64 *)(r10 - 32) = r2;"
+ "call %[bpf_cast_to_kern_ctx];"
+ "r2 = *(u64 *)(r10 - 32);"
+ "r0 = r2;"
+ "exit;"
+ :
+ : __imm(bpf_cast_to_kern_ctx)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__xlated("3: r3 = 1")
+/* bpf_rdonly_cast is replaced by a single assignment */
+__xlated("4: r0 = r1")
+__xlated("5: r0 = r3")
+void kfunc_bpf_rdonly_cast(void)
+{
+ asm volatile (
+ "r2 = %[btf_id];"
+ "r3 = 1;"
+ "*(u64 *)(r10 - 32) = r3;"
+ "call %[bpf_rdonly_cast];"
+ "r3 = *(u64 *)(r10 - 32);"
+ "r0 = r3;"
+ :
+ : __imm(bpf_rdonly_cast),
+ [btf_id]"r"(bpf_core_type_id_kernel(union bpf_attr))
+ : __clobber_common);
+}
+
+/* BTF FUNC records are not generated for kfuncs referenced
+ * from inline assembly. These records are necessary for
+ * libbpf to link the program. The function below is a hack
+ * to ensure that BTF FUNC records are generated.
+ */
+void kfunc_root(void)
+{
+ bpf_cast_to_kern_ctx(0);
+ bpf_rdonly_cast(0, 0);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c b/tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c
new file mode 100644
index 000000000000..325a2bab4a71
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/bpf_get_stack.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct test_val);
+} map_array_48b SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+SEC("tracepoint")
+__description("bpf_get_stack return R0 within range")
+__success
+__naked void stack_return_r0_within_range(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r7 = r0; \
+ r9 = %[__imm_0]; \
+ r1 = r6; \
+ r2 = r7; \
+ r3 = %[__imm_0]; \
+ r4 = 256; \
+ call %[bpf_get_stack]; \
+ r1 = 0; \
+ r8 = r0; \
+ r8 <<= 32; \
+ r8 s>>= 32; \
+ if r1 s> r8 goto l0_%=; \
+ r9 -= r8; \
+ r2 = r7; \
+ r2 += r8; \
+ r1 = r9; \
+ r1 <<= 32; \
+ r1 s>>= 32; \
+ r3 = r2; \
+ r3 += r1; \
+ r1 = r7; \
+ r5 = %[__imm_0]; \
+ r1 += r5; \
+ if r3 >= r1 goto l0_%=; \
+ r1 = r6; \
+ r3 = r9; \
+ r4 = 0; \
+ call %[bpf_get_stack]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_stack),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, sizeof(struct test_val) / 2)
+ : __clobber_all);
+}
+
+SEC("iter/task")
+__description("bpf_get_task_stack return R0 range is refined")
+__success
+__naked void return_r0_range_is_refined(void)
+{
+ asm volatile (" \
+ r6 = *(u64*)(r1 + 0); \
+ r6 = *(u64*)(r6 + 0); /* ctx->meta->seq */\
+ r7 = *(u64*)(r1 + 8); /* ctx->task */\
+ r1 = %[map_array_48b] ll; /* fixup_map_array_48b */\
+ r2 = 0; \
+ *(u64*)(r10 - 8) = r2; \
+ r2 = r10; \
+ r2 += -8; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: if r7 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r1 = r7; \
+ r2 = r0; \
+ r9 = r0; /* keep buf for seq_write */\
+ r3 = 48; \
+ r4 = 0; \
+ call %[bpf_get_task_stack]; \
+ if r0 s> 0 goto l2_%=; \
+ r0 = 0; \
+ exit; \
+l2_%=: r1 = r6; \
+ r2 = r9; \
+ r3 = r0; \
+ call %[bpf_seq_write]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_task_stack),
+ __imm(bpf_map_lookup_elem),
+ __imm(bpf_seq_write),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_trap.c b/tools/testing/selftests/bpf/progs/verifier_bpf_trap.c
new file mode 100644
index 000000000000..35e2cdc00a01
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bpf_trap.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#if __clang_major__ >= 21 && 0
+SEC("socket")
+__description("__builtin_trap with simple c code")
+__failure __msg("unexpected __bpf_trap() due to uninitialized variable?")
+void bpf_builtin_trap_with_simple_c(void)
+{
+ __builtin_trap();
+}
+#endif
+
+SEC("socket")
+__description("__bpf_trap with simple c code")
+__failure __msg("unexpected __bpf_trap() due to uninitialized variable?")
+void bpf_trap_with_simple_c(void)
+{
+ __bpf_trap();
+}
+
+SEC("socket")
+__description("__bpf_trap as the second-from-last insn")
+__failure __msg("unexpected __bpf_trap() due to uninitialized variable?")
+__naked void bpf_trap_at_func_end(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "call %[__bpf_trap];"
+ "exit;"
+ :
+ : __imm(__bpf_trap)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("dead code __bpf_trap in the middle of code")
+__success
+__naked void dead_bpf_trap_in_middle(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "if r0 == 0 goto +1;"
+ "call %[__bpf_trap];"
+ "r0 = 2;"
+ "exit;"
+ :
+ : __imm(__bpf_trap)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("reachable __bpf_trap in the middle of code")
+__failure __msg("unexpected __bpf_trap() due to uninitialized variable?")
+__naked void live_bpf_trap_in_middle(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "if r0 == 1 goto +1;"
+ "call %[__bpf_trap];"
+ "r0 = 2;"
+ "exit;"
+ :
+ : __imm(__bpf_trap)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bswap.c b/tools/testing/selftests/bpf/progs/verifier_bswap.c
new file mode 100644
index 000000000000..e61755656e8d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bswap.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
+
+SEC("socket")
+__description("BSWAP, 16")
+__success __success_unpriv __retval(0x23ff)
+__naked void bswap_16(void)
+{
+ asm volatile (" \
+ r0 = 0xff23; \
+ r0 = bswap16 r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("BSWAP, 32")
+__success __success_unpriv __retval(0x23ff0000)
+__naked void bswap_32(void)
+{
+ asm volatile (" \
+ r0 = 0xff23; \
+ r0 = bswap32 r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("BSWAP, 64")
+__success __success_unpriv __retval(0x34ff12ff)
+__naked void bswap_64(void)
+{
+ asm volatile (" \
+ r0 = %[u64_val] ll; \
+ r0 = bswap64 r0; \
+ exit; \
+" :
+ : [u64_val]"i"(0xff12ff34ff56ff78ull)
+ : __clobber_all);
+}
+
+#else
+
+SEC("socket")
+__description("cpuv4 is not supported by compiler or jit, use a dummy test")
+__success
+int dummy_test(void)
+{
+ return 0;
+}
+
+#endif
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c
new file mode 100644
index 000000000000..03942cec07e5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/btf_ctx_access.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("fentry/bpf_modify_return_test")
+__description("btf_ctx_access accept")
+__success __retval(0)
+__naked void btf_ctx_access_accept(void)
+{
+ asm volatile (" \
+ r2 = *(u64 *)(r1 + 8); /* load 2nd argument value (int pointer) */\
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("fentry/bpf_fentry_test9")
+__description("btf_ctx_access u32 pointer accept")
+__success __retval(0)
+__naked void ctx_access_u32_pointer_accept(void)
+{
+ asm volatile (" \
+ r2 = *(u64 *)(r1 + 0); /* load 1nd argument value (u32 pointer) */\
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("fentry/bpf_fentry_test9")
+__description("btf_ctx_access u32 pointer reject u32")
+__failure __msg("size 4 must be 8")
+__naked void ctx_access_u32_pointer_reject_32(void)
+{
+ asm volatile (" \
+ r2 = *(u32 *)(r1 + 0); /* load 1st argument with narrow load */\
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("fentry/bpf_fentry_test9")
+__description("btf_ctx_access u32 pointer reject u16")
+__failure __msg("size 2 must be 8")
+__naked void ctx_access_u32_pointer_reject_16(void)
+{
+ asm volatile (" \
+ r2 = *(u16 *)(r1 + 0); /* load 1st argument with narrow load */\
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("fentry/bpf_fentry_test9")
+__description("btf_ctx_access u32 pointer reject u8")
+__failure __msg("size 1 must be 8")
+__naked void ctx_access_u32_pointer_reject_8(void)
+{
+ asm volatile (" \
+ r2 = *(u8 *)(r1 + 0); /* load 1st argument with narrow load */\
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("fentry/bpf_fentry_test10")
+__description("btf_ctx_access const void pointer accept")
+__success __retval(0)
+__naked void ctx_access_const_void_pointer_accept(void)
+{
+ asm volatile (" \
+ r2 = *(u64 *)(r1 + 0); /* load 1st argument value (const void pointer) */\
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_btf_unreliable_prog.c b/tools/testing/selftests/bpf/progs/verifier_btf_unreliable_prog.c
new file mode 100644
index 000000000000..36e033a2e02c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_btf_unreliable_prog.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+struct whatever {};
+
+SEC("kprobe")
+__success __log_level(2)
+/* context type is wrong, making it impossible to freplace this program */
+int btf_unreliable_kprobe(struct whatever *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_cfg.c b/tools/testing/selftests/bpf/progs/verifier_cfg.c
new file mode 100644
index 000000000000..c1f55e1d80a4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_cfg.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/cfg.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("unreachable")
+__failure __msg("unreachable")
+__failure_unpriv
+__naked void unreachable(void)
+{
+ asm volatile (" \
+ exit; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unreachable2")
+__failure __msg("unreachable")
+__failure_unpriv
+__naked void unreachable2(void)
+{
+ asm volatile (" \
+ goto l0_%=; \
+ goto l0_%=; \
+l0_%=: exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("out of range jump")
+__failure __msg("jump out of range")
+__failure_unpriv
+__naked void out_of_range_jump(void)
+{
+ asm volatile (" \
+ goto l0_%=; \
+ exit; \
+l0_%=: \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("out of range jump2")
+__failure __msg("jump out of range")
+__failure_unpriv
+__naked void out_of_range_jump2(void)
+{
+ asm volatile (" \
+ goto -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("loop (back-edge)")
+__failure __msg("unreachable insn 1")
+__msg_unpriv("back-edge")
+__naked void loop_back_edge(void)
+{
+ asm volatile (" \
+l0_%=: goto l0_%=; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("loop2 (back-edge)")
+__failure __msg("unreachable insn 4")
+__msg_unpriv("back-edge")
+__naked void loop2_back_edge(void)
+{
+ asm volatile (" \
+l0_%=: r1 = r0; \
+ r2 = r0; \
+ r3 = r0; \
+ goto l0_%=; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("conditional loop")
+__failure __msg("infinite loop detected")
+__msg_unpriv("back-edge")
+__naked void conditional_loop(void)
+{
+ asm volatile (" \
+ r0 = r1; \
+l0_%=: r2 = r0; \
+ r3 = r0; \
+ if r1 == 0 goto l0_%=; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("conditional loop (2)")
+__success
+__failure_unpriv __msg_unpriv("back-edge from insn 10 to 11")
+__naked void conditional_loop2(void)
+{
+ asm volatile (" \
+ r9 = 2 ll; \
+ r3 = 0x20 ll; \
+ r4 = 0x35 ll; \
+ r8 = r4; \
+ goto l1_%=; \
+l0_%=: r9 -= r3; \
+ r9 -= r4; \
+ r9 -= r8; \
+l1_%=: r8 += r4; \
+ if r8 < 0x64 goto l0_%=; \
+ r0 = r9; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unconditional loop after conditional jump")
+__failure __msg("infinite loop detected")
+__failure_unpriv __msg_unpriv("back-edge from insn 3 to 2")
+__naked void uncond_loop_after_cond_jmp(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ if r0 > 0 goto l1_%=; \
+l0_%=: r0 = 1; \
+ goto l0_%=; \
+l1_%=: exit; \
+" ::: __clobber_all);
+}
+
+
+__naked __noinline __used
+static unsigned long never_ending_subprog()
+{
+ asm volatile (" \
+ r0 = r1; \
+ goto -1; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unconditional loop after conditional jump")
+/* infinite loop is detected *after* check_cfg() */
+__failure __msg("infinite loop detected")
+__naked void uncond_loop_in_subprog_after_cond_jmp(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ if r0 > 0 goto l1_%=; \
+l0_%=: r0 += 1; \
+ call never_ending_subprog; \
+l1_%=: exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c b/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c
new file mode 100644
index 000000000000..6e0f349f8f15
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("cgroup/sock")
+__description("bpf_exit with invalid return code. test1")
+__failure __msg("smin=0 smax=4294967295 should have been in [0, 1]")
+__naked void with_invalid_return_code_test1(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + 0); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("cgroup/sock")
+__description("bpf_exit with invalid return code. test2")
+__success
+__naked void with_invalid_return_code_test2(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + 0); \
+ r0 &= 1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("cgroup/sock")
+__description("bpf_exit with invalid return code. test3")
+__failure __msg("smin=0 smax=3 should have been in [0, 1]")
+__naked void with_invalid_return_code_test3(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + 0); \
+ r0 &= 3; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("cgroup/sock")
+__description("bpf_exit with invalid return code. test4")
+__success
+__naked void with_invalid_return_code_test4(void)
+{
+ asm volatile (" \
+ r0 = 1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("cgroup/sock")
+__description("bpf_exit with invalid return code. test5")
+__failure __msg("smin=2 smax=2 should have been in [0, 1]")
+__naked void with_invalid_return_code_test5(void)
+{
+ asm volatile (" \
+ r0 = 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("cgroup/sock")
+__description("bpf_exit with invalid return code. test6")
+__failure __msg("R0 is not a known value (ctx)")
+__naked void with_invalid_return_code_test6(void)
+{
+ asm volatile (" \
+ r0 = r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("cgroup/sock")
+__description("bpf_exit with invalid return code. test7")
+__failure __msg("R0 has unknown scalar value should have been in [0, 1]")
+__naked void with_invalid_return_code_test7(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + 0); \
+ r2 = *(u32*)(r1 + 4); \
+ r0 *= r2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c b/tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c
new file mode 100644
index 000000000000..5ee3d349d6d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/cgroup_skb.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("cgroup/skb")
+__description("direct packet read test#1 for CGROUP_SKB")
+__success __failure_unpriv
+__msg_unpriv("invalid bpf_context access off=76 size=4")
+__retval(0)
+__naked void test_1_for_cgroup_skb(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r4 = *(u32*)(r1 + %[__sk_buff_len]); \
+ r5 = *(u32*)(r1 + %[__sk_buff_pkt_type]); \
+ r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
+ *(u32*)(r1 + %[__sk_buff_mark]) = r6; \
+ r7 = *(u32*)(r1 + %[__sk_buff_queue_mapping]); \
+ r8 = *(u32*)(r1 + %[__sk_buff_protocol]); \
+ r9 = *(u32*)(r1 + %[__sk_buff_vlan_present]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
+ __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
+ __imm_const(__sk_buff_protocol, offsetof(struct __sk_buff, protocol)),
+ __imm_const(__sk_buff_queue_mapping, offsetof(struct __sk_buff, queue_mapping)),
+ __imm_const(__sk_buff_vlan_present, offsetof(struct __sk_buff, vlan_present))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("direct packet read test#2 for CGROUP_SKB")
+__success __success_unpriv __retval(0)
+__naked void test_2_for_cgroup_skb(void)
+{
+ asm volatile (" \
+ r4 = *(u32*)(r1 + %[__sk_buff_vlan_tci]); \
+ r5 = *(u32*)(r1 + %[__sk_buff_vlan_proto]); \
+ r6 = *(u32*)(r1 + %[__sk_buff_priority]); \
+ *(u32*)(r1 + %[__sk_buff_priority]) = r6; \
+ r7 = *(u32*)(r1 + %[__sk_buff_ingress_ifindex]);\
+ r8 = *(u32*)(r1 + %[__sk_buff_tc_index]); \
+ r9 = *(u32*)(r1 + %[__sk_buff_hash]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_hash, offsetof(struct __sk_buff, hash)),
+ __imm_const(__sk_buff_ingress_ifindex, offsetof(struct __sk_buff, ingress_ifindex)),
+ __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)),
+ __imm_const(__sk_buff_tc_index, offsetof(struct __sk_buff, tc_index)),
+ __imm_const(__sk_buff_vlan_proto, offsetof(struct __sk_buff, vlan_proto)),
+ __imm_const(__sk_buff_vlan_tci, offsetof(struct __sk_buff, vlan_tci))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("direct packet read test#3 for CGROUP_SKB")
+__success __success_unpriv __retval(0)
+__naked void test_3_for_cgroup_skb(void)
+{
+ asm volatile (" \
+ r4 = *(u32*)(r1 + %[__sk_buff_cb_0]); \
+ r5 = *(u32*)(r1 + %[__sk_buff_cb_1]); \
+ r6 = *(u32*)(r1 + %[__sk_buff_cb_2]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_cb_3]); \
+ r8 = *(u32*)(r1 + %[__sk_buff_cb_4]); \
+ r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \
+ *(u32*)(r1 + %[__sk_buff_cb_0]) = r4; \
+ *(u32*)(r1 + %[__sk_buff_cb_1]) = r5; \
+ *(u32*)(r1 + %[__sk_buff_cb_2]) = r6; \
+ *(u32*)(r1 + %[__sk_buff_cb_3]) = r7; \
+ *(u32*)(r1 + %[__sk_buff_cb_4]) = r8; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])),
+ __imm_const(__sk_buff_cb_1, offsetof(struct __sk_buff, cb[1])),
+ __imm_const(__sk_buff_cb_2, offsetof(struct __sk_buff, cb[2])),
+ __imm_const(__sk_buff_cb_3, offsetof(struct __sk_buff, cb[3])),
+ __imm_const(__sk_buff_cb_4, offsetof(struct __sk_buff, cb[4])),
+ __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("direct packet read test#4 for CGROUP_SKB")
+__success __success_unpriv __retval(0)
+__naked void test_4_for_cgroup_skb(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_family]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_remote_ip4]); \
+ r4 = *(u32*)(r1 + %[__sk_buff_local_ip4]); \
+ r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_0]); \
+ r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_1]); \
+ r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_2]); \
+ r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_3]); \
+ r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_0]); \
+ r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_1]); \
+ r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_2]); \
+ r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_3]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_remote_port]); \
+ r8 = *(u32*)(r1 + %[__sk_buff_local_port]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_family, offsetof(struct __sk_buff, family)),
+ __imm_const(__sk_buff_local_ip4, offsetof(struct __sk_buff, local_ip4)),
+ __imm_const(__sk_buff_local_ip6_0, offsetof(struct __sk_buff, local_ip6[0])),
+ __imm_const(__sk_buff_local_ip6_1, offsetof(struct __sk_buff, local_ip6[1])),
+ __imm_const(__sk_buff_local_ip6_2, offsetof(struct __sk_buff, local_ip6[2])),
+ __imm_const(__sk_buff_local_ip6_3, offsetof(struct __sk_buff, local_ip6[3])),
+ __imm_const(__sk_buff_local_port, offsetof(struct __sk_buff, local_port)),
+ __imm_const(__sk_buff_remote_ip4, offsetof(struct __sk_buff, remote_ip4)),
+ __imm_const(__sk_buff_remote_ip6_0, offsetof(struct __sk_buff, remote_ip6[0])),
+ __imm_const(__sk_buff_remote_ip6_1, offsetof(struct __sk_buff, remote_ip6[1])),
+ __imm_const(__sk_buff_remote_ip6_2, offsetof(struct __sk_buff, remote_ip6[2])),
+ __imm_const(__sk_buff_remote_ip6_3, offsetof(struct __sk_buff, remote_ip6[3])),
+ __imm_const(__sk_buff_remote_port, offsetof(struct __sk_buff, remote_port))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid access of tc_classid for CGROUP_SKB")
+__failure __msg("invalid bpf_context access")
+__failure_unpriv
+__naked void tc_classid_for_cgroup_skb(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid access of data_meta for CGROUP_SKB")
+__failure __msg("invalid bpf_context access")
+__failure_unpriv
+__naked void data_meta_for_cgroup_skb(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_data_meta]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid access of flow_keys for CGROUP_SKB")
+__failure __msg("invalid bpf_context access")
+__failure_unpriv
+__naked void flow_keys_for_cgroup_skb(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_flow_keys]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_flow_keys, offsetof(struct __sk_buff, flow_keys))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid write access to napi_id for CGROUP_SKB")
+__failure __msg("invalid bpf_context access")
+__failure_unpriv
+__naked void napi_id_for_cgroup_skb(void)
+{
+ asm volatile (" \
+ r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \
+ *(u32*)(r1 + %[__sk_buff_napi_id]) = r9; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("write tstamp from CGROUP_SKB")
+__success __failure_unpriv
+__msg_unpriv("invalid bpf_context access off=152 size=8")
+__retval(0)
+__naked void write_tstamp_from_cgroup_skb(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ *(u64*)(r1 + %[__sk_buff_tstamp]) = r0; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("read tstamp from CGROUP_SKB")
+__success __success_unpriv __retval(0)
+__naked void read_tstamp_from_cgroup_skb(void)
+{
+ asm volatile (" \
+ r0 = *(u64*)(r1 + %[__sk_buff_tstamp]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c b/tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c
new file mode 100644
index 000000000000..9a13f5c11ac7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/cgroup_storage.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
+ __uint(max_entries, 0);
+ __type(key, struct bpf_cgroup_storage_key);
+ __type(value, char[TEST_DATA_LEN]);
+} cgroup_storage SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
+ __uint(max_entries, 0);
+ __type(key, struct bpf_cgroup_storage_key);
+ __type(value, char[64]);
+} percpu_cgroup_storage SEC(".maps");
+
+SEC("cgroup/skb")
+__description("valid cgroup storage access")
+__success __success_unpriv __retval(0)
+__naked void valid_cgroup_storage_access(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ r1 = %[cgroup_storage] ll; \
+ call %[bpf_get_local_storage]; \
+ r1 = *(u32*)(r0 + 0); \
+ r0 = r1; \
+ r0 &= 1; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_addr(cgroup_storage)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid cgroup storage access 1")
+__failure __msg("cannot pass map_type 1 into func bpf_get_local_storage")
+__failure_unpriv
+__naked void invalid_cgroup_storage_access_1(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_get_local_storage]; \
+ r1 = *(u32*)(r0 + 0); \
+ r0 = r1; \
+ r0 &= 1; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid cgroup storage access 2")
+__failure __msg("fd 1 is not pointing to valid bpf_map")
+__failure_unpriv
+__naked void invalid_cgroup_storage_access_2(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ .8byte %[ld_map_fd]; \
+ .8byte 0; \
+ call %[bpf_get_local_storage]; \
+ r0 &= 1; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 1))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid cgroup storage access 3")
+__failure __msg("invalid access to map value, value_size=64 off=256 size=4")
+__failure_unpriv
+__naked void invalid_cgroup_storage_access_3(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ r1 = %[cgroup_storage] ll; \
+ call %[bpf_get_local_storage]; \
+ r1 = *(u32*)(r0 + 256); \
+ r1 += 1; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_addr(cgroup_storage)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid cgroup storage access 4")
+__failure __msg("invalid access to map value, value_size=64 off=-2 size=4")
+__failure_unpriv
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void invalid_cgroup_storage_access_4(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ r1 = %[cgroup_storage] ll; \
+ call %[bpf_get_local_storage]; \
+ r1 = *(u32*)(r0 - 2); \
+ r0 = r1; \
+ r1 += 1; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_addr(cgroup_storage)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid cgroup storage access 5")
+__failure __msg("get_local_storage() doesn't support non-zero flags")
+__failure_unpriv
+__naked void invalid_cgroup_storage_access_5(void)
+{
+ asm volatile (" \
+ r2 = 7; \
+ r1 = %[cgroup_storage] ll; \
+ call %[bpf_get_local_storage]; \
+ r1 = *(u32*)(r0 + 0); \
+ r0 = r1; \
+ r0 &= 1; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_addr(cgroup_storage)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid cgroup storage access 6")
+__failure __msg("get_local_storage() doesn't support non-zero flags")
+__msg_unpriv("R2 leaks addr into helper function")
+__naked void invalid_cgroup_storage_access_6(void)
+{
+ asm volatile (" \
+ r2 = r1; \
+ r1 = %[cgroup_storage] ll; \
+ call %[bpf_get_local_storage]; \
+ r1 = *(u32*)(r0 + 0); \
+ r0 = r1; \
+ r0 &= 1; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_addr(cgroup_storage)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("valid per-cpu cgroup storage access")
+__success __success_unpriv __retval(0)
+__naked void per_cpu_cgroup_storage_access(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ r1 = %[percpu_cgroup_storage] ll; \
+ call %[bpf_get_local_storage]; \
+ r1 = *(u32*)(r0 + 0); \
+ r0 = r1; \
+ r0 &= 1; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_addr(percpu_cgroup_storage)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid per-cpu cgroup storage access 1")
+__failure __msg("cannot pass map_type 1 into func bpf_get_local_storage")
+__failure_unpriv
+__naked void cpu_cgroup_storage_access_1(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_get_local_storage]; \
+ r1 = *(u32*)(r0 + 0); \
+ r0 = r1; \
+ r0 &= 1; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid per-cpu cgroup storage access 2")
+__failure __msg("fd 1 is not pointing to valid bpf_map")
+__failure_unpriv
+__naked void cpu_cgroup_storage_access_2(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ .8byte %[ld_map_fd]; \
+ .8byte 0; \
+ call %[bpf_get_local_storage]; \
+ r0 &= 1; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 1))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid per-cpu cgroup storage access 3")
+__failure __msg("invalid access to map value, value_size=64 off=256 size=4")
+__failure_unpriv
+__naked void cpu_cgroup_storage_access_3(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ r1 = %[percpu_cgroup_storage] ll; \
+ call %[bpf_get_local_storage]; \
+ r1 = *(u32*)(r0 + 256); \
+ r1 += 1; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_addr(percpu_cgroup_storage)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid per-cpu cgroup storage access 4")
+__failure __msg("invalid access to map value, value_size=64 off=-2 size=4")
+__failure_unpriv
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void cpu_cgroup_storage_access_4(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ r1 = %[cgroup_storage] ll; \
+ call %[bpf_get_local_storage]; \
+ r1 = *(u32*)(r0 - 2); \
+ r0 = r1; \
+ r1 += 1; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_addr(cgroup_storage)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid per-cpu cgroup storage access 5")
+__failure __msg("get_local_storage() doesn't support non-zero flags")
+__failure_unpriv
+__naked void cpu_cgroup_storage_access_5(void)
+{
+ asm volatile (" \
+ r2 = 7; \
+ r1 = %[percpu_cgroup_storage] ll; \
+ call %[bpf_get_local_storage]; \
+ r1 = *(u32*)(r0 + 0); \
+ r0 = r1; \
+ r0 &= 1; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_addr(percpu_cgroup_storage)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("invalid per-cpu cgroup storage access 6")
+__failure __msg("get_local_storage() doesn't support non-zero flags")
+__msg_unpriv("R2 leaks addr into helper function")
+__naked void cpu_cgroup_storage_access_6(void)
+{
+ asm volatile (" \
+ r2 = r1; \
+ r1 = %[percpu_cgroup_storage] ll; \
+ call %[bpf_get_local_storage]; \
+ r1 = *(u32*)(r0 + 0); \
+ r0 = r1; \
+ r0 &= 1; \
+ exit; \
+" :
+ : __imm(bpf_get_local_storage),
+ __imm_addr(percpu_cgroup_storage)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_const.c b/tools/testing/selftests/bpf/progs/verifier_const.c
new file mode 100644
index 000000000000..e118dbb768bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_const.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Isovalent */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+const volatile long foo = 42;
+long bar;
+long bart = 96;
+
+SEC("tc/ingress")
+__description("rodata/strtol: write rejected")
+__failure __msg("write into map forbidden")
+int tcx1(struct __sk_buff *skb)
+{
+ char buff[] = { '8', '4', '\0' };
+ bpf_strtol(buff, sizeof(buff), 0, (long *)&foo);
+ return TCX_PASS;
+}
+
+SEC("tc/ingress")
+__description("bss/strtol: write accepted")
+__success
+int tcx2(struct __sk_buff *skb)
+{
+ char buff[] = { '8', '4', '\0' };
+ bpf_strtol(buff, sizeof(buff), 0, &bar);
+ return TCX_PASS;
+}
+
+SEC("tc/ingress")
+__description("data/strtol: write accepted")
+__success
+int tcx3(struct __sk_buff *skb)
+{
+ char buff[] = { '8', '4', '\0' };
+ bpf_strtol(buff, sizeof(buff), 0, &bart);
+ return TCX_PASS;
+}
+
+SEC("tc/ingress")
+__description("rodata/mtu: write rejected")
+__failure __msg("write into map forbidden")
+int tcx4(struct __sk_buff *skb)
+{
+ bpf_check_mtu(skb, skb->ifindex, (__u32 *)&foo, 0, 0);
+ return TCX_PASS;
+}
+
+SEC("tc/ingress")
+__description("bss/mtu: write accepted")
+__success
+int tcx5(struct __sk_buff *skb)
+{
+ bpf_check_mtu(skb, skb->ifindex, (__u32 *)&bar, 0, 0);
+ return TCX_PASS;
+}
+
+SEC("tc/ingress")
+__description("data/mtu: write accepted")
+__success
+int tcx6(struct __sk_buff *skb)
+{
+ bpf_check_mtu(skb, skb->ifindex, (__u32 *)&bart, 0, 0);
+ return TCX_PASS;
+}
+
+static inline void write_fixed(volatile void *p, __u32 val)
+{
+ *(volatile __u32 *)p = val;
+}
+
+static inline void write_dyn(void *p, void *val, int len)
+{
+ bpf_copy_from_user(p, len, val);
+}
+
+SEC("tc/ingress")
+__description("rodata/mark: write with unknown reg rejected")
+__failure __msg("write into map forbidden")
+int tcx7(struct __sk_buff *skb)
+{
+ write_fixed((void *)&foo, skb->mark);
+ return TCX_PASS;
+}
+
+SEC("lsm.s/bprm_committed_creds")
+__description("rodata/mark: write with unknown reg rejected")
+__failure __msg("write into map forbidden")
+int BPF_PROG(bprm, struct linux_binprm *bprm)
+{
+ write_dyn((void *)&foo, &bart, bpf_get_prandom_u32() & 3);
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_const_or.c b/tools/testing/selftests/bpf/progs/verifier_const_or.c
new file mode 100644
index 000000000000..68c568c3c3a0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_const_or.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/const_or.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("tracepoint")
+__description("constant register |= constant should keep constant type")
+__success
+__naked void constant_should_keep_constant_type(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -48; \
+ r2 = 34; \
+ r2 |= 13; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("constant register |= constant should not bypass stack boundary checks")
+__failure __msg("invalid write to stack R1 off=-48 size=58")
+__naked void not_bypass_stack_boundary_checks_1(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -48; \
+ r2 = 34; \
+ r2 |= 24; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("constant register |= constant register should keep constant type")
+__success
+__naked void register_should_keep_constant_type(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -48; \
+ r2 = 34; \
+ r4 = 13; \
+ r2 |= r4; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("constant register |= constant register should not bypass stack boundary checks")
+__failure __msg("invalid write to stack R1 off=-48 size=58")
+__naked void not_bypass_stack_boundary_checks_2(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -48; \
+ r2 = 34; \
+ r4 = 24; \
+ r2 |= r4; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c
new file mode 100644
index 000000000000..424463094760
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/ctx.c */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+
+SEC("tc")
+__description("context stores via BPF_ATOMIC")
+__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed")
+__naked void context_stores_via_bpf_atomic(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ lock *(u32 *)(r1 + %[__sk_buff_mark]) += w0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("arithmetic ops make PTR_TO_CTX unusable")
+__failure __msg("dereference of modified ctx ptr")
+__naked void make_ptr_to_ctx_unusable(void)
+{
+ asm volatile (" \
+ r1 += %[__imm_0]; \
+ r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
+ exit; \
+" :
+ : __imm_const(__imm_0,
+ offsetof(struct __sk_buff, data) - offsetof(struct __sk_buff, mark)),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("pass unmodified ctx pointer to helper")
+__success __retval(0)
+__naked void unmodified_ctx_pointer_to_helper(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ call %[bpf_csum_update]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_csum_update)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("pass modified ctx pointer to helper, 1")
+__failure __msg("negative offset ctx ptr R1 off=-612 disallowed")
+__naked void ctx_pointer_to_helper_1(void)
+{
+ asm volatile (" \
+ r1 += -612; \
+ r2 = 0; \
+ call %[bpf_csum_update]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_csum_update)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("pass modified ctx pointer to helper, 2")
+__failure __msg("negative offset ctx ptr R1 off=-612 disallowed")
+__failure_unpriv __msg_unpriv("negative offset ctx ptr R1 off=-612 disallowed")
+__naked void ctx_pointer_to_helper_2(void)
+{
+ asm volatile (" \
+ r1 += -612; \
+ call %[bpf_get_socket_cookie]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_socket_cookie)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("pass modified ctx pointer to helper, 3")
+__failure __msg("variable ctx access var_off=(0x0; 0x4)")
+__naked void ctx_pointer_to_helper_3(void)
+{
+ asm volatile (" \
+ r3 = *(u32*)(r1 + 0); \
+ r3 &= 4; \
+ r1 += r3; \
+ r2 = 0; \
+ call %[bpf_csum_update]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_csum_update)
+ : __clobber_all);
+}
+
+SEC("cgroup/sendmsg6")
+__description("pass ctx or null check, 1: ctx")
+__success
+__naked void or_null_check_1_ctx(void)
+{
+ asm volatile (" \
+ call %[bpf_get_netns_cookie]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_netns_cookie)
+ : __clobber_all);
+}
+
+SEC("cgroup/sendmsg6")
+__description("pass ctx or null check, 2: null")
+__success
+__naked void or_null_check_2_null(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ call %[bpf_get_netns_cookie]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_netns_cookie)
+ : __clobber_all);
+}
+
+SEC("cgroup/sendmsg6")
+__description("pass ctx or null check, 3: 1")
+__failure __msg("R1 type=scalar expected=ctx")
+__naked void or_null_check_3_1(void)
+{
+ asm volatile (" \
+ r1 = 1; \
+ call %[bpf_get_netns_cookie]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_netns_cookie)
+ : __clobber_all);
+}
+
+SEC("cgroup/sendmsg6")
+__description("pass ctx or null check, 4: ctx - const")
+__failure __msg("negative offset ctx ptr R1 off=-612 disallowed")
+__naked void null_check_4_ctx_const(void)
+{
+ asm volatile (" \
+ r1 += -612; \
+ call %[bpf_get_netns_cookie]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_netns_cookie)
+ : __clobber_all);
+}
+
+SEC("cgroup/connect4")
+__description("pass ctx or null check, 5: null (connect)")
+__success
+__naked void null_check_5_null_connect(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ call %[bpf_get_netns_cookie]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_netns_cookie)
+ : __clobber_all);
+}
+
+SEC("cgroup/post_bind4")
+__description("pass ctx or null check, 6: null (bind)")
+__success
+__naked void null_check_6_null_bind(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ call %[bpf_get_netns_cookie]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_netns_cookie)
+ : __clobber_all);
+}
+
+SEC("cgroup/post_bind4")
+__description("pass ctx or null check, 7: ctx (bind)")
+__success
+__naked void null_check_7_ctx_bind(void)
+{
+ asm volatile (" \
+ call %[bpf_get_socket_cookie]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_socket_cookie)
+ : __clobber_all);
+}
+
+SEC("cgroup/post_bind4")
+__description("pass ctx or null check, 8: null (bind)")
+__failure __msg("R1 type=scalar expected=ctx")
+__naked void null_check_8_null_bind(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ call %[bpf_get_socket_cookie]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_socket_cookie)
+ : __clobber_all);
+}
+
+#define narrow_load(type, ctx, field) \
+ SEC(type) \
+ __description("narrow load on field " #field " of " #ctx) \
+ __failure __msg("invalid bpf_context access") \
+ __naked void invalid_narrow_load##ctx##field(void) \
+ { \
+ asm volatile (" \
+ r1 = *(u32 *)(r1 + %[off]); \
+ r0 = 0; \
+ exit;" \
+ : \
+ : __imm_const(off, offsetof(struct ctx, field) + 4) \
+ : __clobber_all); \
+ }
+
+narrow_load("cgroup/getsockopt", bpf_sockopt, sk);
+narrow_load("cgroup/getsockopt", bpf_sockopt, optval);
+narrow_load("cgroup/getsockopt", bpf_sockopt, optval_end);
+narrow_load("tc", __sk_buff, sk);
+narrow_load("cgroup/bind4", bpf_sock_addr, sk);
+narrow_load("sockops", bpf_sock_ops, sk);
+narrow_load("sockops", bpf_sock_ops, skb_data);
+narrow_load("sockops", bpf_sock_ops, skb_data_end);
+narrow_load("sockops", bpf_sock_ops, skb_hwtstamp);
+
+#define unaligned_access(type, ctx, field) \
+ SEC(type) \
+ __description("unaligned access on field " #field " of " #ctx) \
+ __failure __msg("invalid bpf_context access") \
+ __naked void unaligned_ctx_access_##ctx##field(void) \
+ { \
+ asm volatile (" \
+ r1 = *(u%[size] *)(r1 + %[off]); \
+ r0 = 0; \
+ exit;" \
+ : \
+ : __imm_const(size, sizeof_field(struct ctx, field) * 8), \
+ __imm_const(off, offsetof(struct ctx, field) + 1) \
+ : __clobber_all); \
+ }
+
+unaligned_access("flow_dissector", __sk_buff, data);
+unaligned_access("netfilter", bpf_nf_ctx, skb);
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c b/tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c
new file mode 100644
index 000000000000..65edc89799f9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/ctx_sk_msg.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("sk_msg")
+__description("valid access family in SK_MSG")
+__success
+__naked void access_family_in_sk_msg(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[sk_msg_md_family]); \
+ exit; \
+" :
+ : __imm_const(sk_msg_md_family, offsetof(struct sk_msg_md, family))
+ : __clobber_all);
+}
+
+SEC("sk_msg")
+__description("valid access remote_ip4 in SK_MSG")
+__success
+__naked void remote_ip4_in_sk_msg(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip4]); \
+ exit; \
+" :
+ : __imm_const(sk_msg_md_remote_ip4, offsetof(struct sk_msg_md, remote_ip4))
+ : __clobber_all);
+}
+
+SEC("sk_msg")
+__description("valid access local_ip4 in SK_MSG")
+__success
+__naked void local_ip4_in_sk_msg(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[sk_msg_md_local_ip4]); \
+ exit; \
+" :
+ : __imm_const(sk_msg_md_local_ip4, offsetof(struct sk_msg_md, local_ip4))
+ : __clobber_all);
+}
+
+SEC("sk_msg")
+__description("valid access remote_port in SK_MSG")
+__success
+__naked void remote_port_in_sk_msg(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[sk_msg_md_remote_port]); \
+ exit; \
+" :
+ : __imm_const(sk_msg_md_remote_port, offsetof(struct sk_msg_md, remote_port))
+ : __clobber_all);
+}
+
+SEC("sk_msg")
+__description("valid access local_port in SK_MSG")
+__success
+__naked void local_port_in_sk_msg(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[sk_msg_md_local_port]); \
+ exit; \
+" :
+ : __imm_const(sk_msg_md_local_port, offsetof(struct sk_msg_md, local_port))
+ : __clobber_all);
+}
+
+SEC("sk_skb")
+__description("valid access remote_ip6 in SK_MSG")
+__success
+__naked void remote_ip6_in_sk_msg(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_0]); \
+ r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_1]); \
+ r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_2]); \
+ r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_3]); \
+ exit; \
+" :
+ : __imm_const(sk_msg_md_remote_ip6_0, offsetof(struct sk_msg_md, remote_ip6[0])),
+ __imm_const(sk_msg_md_remote_ip6_1, offsetof(struct sk_msg_md, remote_ip6[1])),
+ __imm_const(sk_msg_md_remote_ip6_2, offsetof(struct sk_msg_md, remote_ip6[2])),
+ __imm_const(sk_msg_md_remote_ip6_3, offsetof(struct sk_msg_md, remote_ip6[3]))
+ : __clobber_all);
+}
+
+SEC("sk_skb")
+__description("valid access local_ip6 in SK_MSG")
+__success
+__naked void local_ip6_in_sk_msg(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_0]); \
+ r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_1]); \
+ r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_2]); \
+ r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_3]); \
+ exit; \
+" :
+ : __imm_const(sk_msg_md_local_ip6_0, offsetof(struct sk_msg_md, local_ip6[0])),
+ __imm_const(sk_msg_md_local_ip6_1, offsetof(struct sk_msg_md, local_ip6[1])),
+ __imm_const(sk_msg_md_local_ip6_2, offsetof(struct sk_msg_md, local_ip6[2])),
+ __imm_const(sk_msg_md_local_ip6_3, offsetof(struct sk_msg_md, local_ip6[3]))
+ : __clobber_all);
+}
+
+SEC("sk_msg")
+__description("valid access size in SK_MSG")
+__success
+__naked void access_size_in_sk_msg(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[sk_msg_md_size]); \
+ exit; \
+" :
+ : __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size))
+ : __clobber_all);
+}
+
+SEC("sk_msg")
+__description("invalid 64B read of size in SK_MSG")
+__failure __msg("invalid bpf_context access")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void of_size_in_sk_msg(void)
+{
+ asm volatile (" \
+ r2 = *(u64*)(r1 + %[sk_msg_md_size]); \
+ exit; \
+" :
+ : __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size))
+ : __clobber_all);
+}
+
+SEC("sk_msg")
+__description("invalid read past end of SK_MSG")
+__failure __msg("invalid bpf_context access")
+__naked void past_end_of_sk_msg(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__imm_0]); \
+ exit; \
+" :
+ : __imm_const(__imm_0, offsetof(struct sk_msg_md, size) + 4)
+ : __clobber_all);
+}
+
+SEC("sk_msg")
+__description("invalid read offset in SK_MSG")
+__failure __msg("invalid bpf_context access")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void read_offset_in_sk_msg(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__imm_0]); \
+ exit; \
+" :
+ : __imm_const(__imm_0, offsetof(struct sk_msg_md, family) + 1)
+ : __clobber_all);
+}
+
+SEC("sk_msg")
+__description("direct packet read for SK_MSG")
+__success
+__naked void packet_read_for_sk_msg(void)
+{
+ asm volatile (" \
+ r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
+ r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
+ __imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
+ : __clobber_all);
+}
+
+SEC("sk_msg")
+__description("direct packet write for SK_MSG")
+__success
+__naked void packet_write_for_sk_msg(void)
+{
+ asm volatile (" \
+ r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
+ r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ *(u8*)(r2 + 0) = r2; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
+ __imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
+ : __clobber_all);
+}
+
+SEC("sk_msg")
+__description("overlapping checks for direct packet access SK_MSG")
+__success
+__naked void direct_packet_access_sk_msg(void)
+{
+ asm volatile (" \
+ r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
+ r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r1 = r2; \
+ r1 += 6; \
+ if r1 > r3 goto l0_%=; \
+ r0 = *(u16*)(r2 + 6); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
+ __imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_d_path.c b/tools/testing/selftests/bpf/progs/verifier_d_path.c
new file mode 100644
index 000000000000..87e51a215558
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_d_path.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/d_path.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("fentry/dentry_open")
+__description("d_path accept")
+__success __retval(0)
+__naked void d_path_accept(void)
+{
+ asm volatile (" \
+ r1 = *(u64 *)(r1 + 0); \
+ r2 = r10; \
+ r2 += -8; \
+ r6 = 0; \
+ *(u64*)(r2 + 0) = r6; \
+ r3 = 8 ll; \
+ call %[bpf_d_path]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_d_path)
+ : __clobber_all);
+}
+
+SEC("fentry/d_path")
+__description("d_path reject")
+__failure __msg("helper call is not allowed in probe")
+__naked void d_path_reject(void)
+{
+ asm volatile (" \
+ r1 = *(u64 *)(r1 + 0); \
+ r2 = r10; \
+ r2 += -8; \
+ r6 = 0; \
+ *(u64*)(r2 + 0) = r6; \
+ r3 = 8 ll; \
+ call %[bpf_d_path]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_d_path)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
new file mode 100644
index 000000000000..28b602ac9cbe
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
@@ -0,0 +1,803 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/direct_packet_access.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("tc")
+__description("pkt_end - pkt_start is allowed")
+__success __retval(TEST_DATA_LEN)
+__naked void end_pkt_start_is_allowed(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r0 -= r2; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test1")
+__success __retval(0)
+__naked void direct_packet_access_test1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test2")
+__success __retval(0)
+__naked void direct_packet_access_test2(void)
+{
+ asm volatile (" \
+ r0 = 1; \
+ r4 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r5 = r3; \
+ r5 += 14; \
+ if r5 > r4 goto l0_%=; \
+ r0 = *(u8*)(r3 + 7); \
+ r4 = *(u8*)(r3 + 12); \
+ r4 *= 14; \
+ r3 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 += r4; \
+ r2 = *(u32*)(r1 + %[__sk_buff_len]); \
+ r2 <<= 49; \
+ r2 >>= 49; \
+ r3 += r2; \
+ r2 = r3; \
+ r2 += 8; \
+ r1 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ if r2 > r1 goto l1_%=; \
+ r1 = *(u8*)(r3 + 4); \
+l1_%=: r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("direct packet access: test3")
+__failure __msg("invalid bpf_context access off=76")
+__failure_unpriv
+__naked void direct_packet_access_test3(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test4 (write)")
+__success __retval(0)
+__naked void direct_packet_access_test4_write(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ *(u8*)(r2 + 0) = r2; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test5 (pkt_end >= reg, good access)")
+__success __retval(0)
+__naked void pkt_end_reg_good_access(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r3 >= r0 goto l0_%=; \
+ r0 = 1; \
+ exit; \
+l0_%=: r0 = *(u8*)(r2 + 0); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test6 (pkt_end >= reg, bad access)")
+__failure __msg("invalid access to packet")
+__naked void pkt_end_reg_bad_access(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r3 >= r0 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+ r0 = 1; \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test7 (pkt_end >= reg, both accesses)")
+__failure __msg("invalid access to packet")
+__naked void pkt_end_reg_both_accesses(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r3 >= r0 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+ r0 = 1; \
+ exit; \
+l0_%=: r0 = *(u8*)(r2 + 0); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test8 (double test, variant 1)")
+__success __retval(0)
+__naked void test8_double_test_variant_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r3 >= r0 goto l0_%=; \
+ if r0 > r3 goto l1_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l1_%=: r0 = 1; \
+ exit; \
+l0_%=: r0 = *(u8*)(r2 + 0); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test9 (double test, variant 2)")
+__success __retval(0)
+__naked void test9_double_test_variant_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r3 >= r0 goto l0_%=; \
+ r0 = 1; \
+ exit; \
+l0_%=: if r0 > r3 goto l1_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l1_%=: r0 = *(u8*)(r2 + 0); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test10 (write invalid)")
+__failure __msg("invalid access to packet")
+__naked void packet_access_test10_write_invalid(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: *(u8*)(r2 + 0) = r2; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test11 (shift, good access)")
+__success __retval(1)
+__naked void access_test11_shift_good_access(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 22; \
+ if r0 > r3 goto l0_%=; \
+ r3 = 144; \
+ r5 = r3; \
+ r5 += 23; \
+ r5 >>= 3; \
+ r6 = r2; \
+ r6 += r5; \
+ r0 = 1; \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test12 (and, good access)")
+__success __retval(1)
+__naked void access_test12_and_good_access(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 22; \
+ if r0 > r3 goto l0_%=; \
+ r3 = 144; \
+ r5 = r3; \
+ r5 += 23; \
+ r5 &= 15; \
+ r6 = r2; \
+ r6 += r5; \
+ r0 = 1; \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test13 (branches, good access)")
+__success __retval(1)
+__naked void access_test13_branches_good_access(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 22; \
+ if r0 > r3 goto l0_%=; \
+ r3 = *(u32*)(r1 + %[__sk_buff_mark]); \
+ r4 = 1; \
+ if r3 > r4 goto l1_%=; \
+ r3 = 14; \
+ goto l2_%=; \
+l1_%=: r3 = 24; \
+l2_%=: r5 = r3; \
+ r5 += 23; \
+ r5 &= 15; \
+ r6 = r2; \
+ r6 += r5; \
+ r0 = 1; \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)")
+__success __retval(1)
+__naked void _0_const_imm_good_access(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 22; \
+ if r0 > r3 goto l0_%=; \
+ r5 = 12; \
+ r5 >>= 4; \
+ r6 = r2; \
+ r6 += r5; \
+ r0 = *(u8*)(r6 + 0); \
+ r0 = 1; \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test15 (spill with xadd)")
+__failure __msg("R2 invalid mem access 'scalar'")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void access_test15_spill_with_xadd(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r5 = 4096; \
+ r4 = r10; \
+ r4 += -8; \
+ *(u64*)(r4 + 0) = r2; \
+ lock *(u64 *)(r4 + 0) += r5; \
+ r2 = *(u64*)(r4 + 0); \
+ *(u32*)(r2 + 0) = r5; \
+ r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test16 (arith on data_end)")
+__failure __msg("R3 pointer arithmetic on pkt_end")
+__naked void test16_arith_on_data_end(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ r3 += 16; \
+ if r0 > r3 goto l0_%=; \
+ *(u8*)(r2 + 0) = r2; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test17 (pruning, alignment)")
+__failure __msg("misaligned packet access off 2+0+15+-4 size 4")
+__flag(BPF_F_STRICT_ALIGNMENT)
+__naked void packet_access_test17_pruning_alignment(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_mark]); \
+ r0 = r2; \
+ r0 += 14; \
+ if r7 > 1 goto l0_%=; \
+l2_%=: if r0 > r3 goto l1_%=; \
+ *(u32*)(r0 - 4) = r0; \
+l1_%=: r0 = 0; \
+ exit; \
+l0_%=: r0 += 1; \
+ goto l2_%=; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test18 (imm += pkt_ptr, 1)")
+__success __retval(0)
+__naked void test18_imm_pkt_ptr_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = 8; \
+ r0 += r2; \
+ if r0 > r3 goto l0_%=; \
+ *(u8*)(r2 + 0) = r2; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test19 (imm += pkt_ptr, 2)")
+__success __retval(0)
+__naked void test19_imm_pkt_ptr_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r4 = 4; \
+ r4 += r2; \
+ *(u8*)(r4 + 0) = r4; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test20 (x += pkt_ptr, 1)")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void test20_x_pkt_ptr_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = 0xffffffff; \
+ *(u64*)(r10 - 8) = r0; \
+ r0 = *(u64*)(r10 - 8); \
+ r0 &= 0x7fff; \
+ r4 = r0; \
+ r4 += r2; \
+ r5 = r4; \
+ r4 += %[__imm_0]; \
+ if r4 > r3 goto l0_%=; \
+ *(u64*)(r5 + 0) = r4; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 0x7fff - 1),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test21 (x += pkt_ptr, 2)")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void test21_x_pkt_ptr_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r4 = 0xffffffff; \
+ *(u64*)(r10 - 8) = r4; \
+ r4 = *(u64*)(r10 - 8); \
+ r4 &= 0x7fff; \
+ r4 += r2; \
+ r5 = r4; \
+ r4 += %[__imm_0]; \
+ if r4 > r3 goto l0_%=; \
+ *(u64*)(r5 + 0) = r4; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 0x7fff - 1),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test22 (x += pkt_ptr, 3)")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void test22_x_pkt_ptr_3(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ *(u64*)(r10 - 8) = r2; \
+ *(u64*)(r10 - 16) = r3; \
+ r3 = *(u64*)(r10 - 16); \
+ if r0 > r3 goto l0_%=; \
+ r2 = *(u64*)(r10 - 8); \
+ r4 = 0xffffffff; \
+ lock *(u64 *)(r10 - 8) += r4; \
+ r4 = *(u64*)(r10 - 8); \
+ r4 >>= 49; \
+ r4 += r2; \
+ r0 = r4; \
+ r0 += 2; \
+ if r0 > r3 goto l0_%=; \
+ r2 = 1; \
+ *(u16*)(r4 + 0) = r2; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test23 (x += pkt_ptr, 4)")
+__failure __msg("invalid access to packet, off=0 size=8, R5(id=3,off=0,r=0)")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void test23_x_pkt_ptr_4(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
+ *(u64*)(r10 - 8) = r0; \
+ r0 = *(u64*)(r10 - 8); \
+ r0 &= 0xffff; \
+ r4 = r0; \
+ r0 = 31; \
+ r0 += r4; \
+ r0 += r2; \
+ r5 = r0; \
+ r0 += %[__imm_0]; \
+ if r0 > r3 goto l0_%=; \
+ *(u64*)(r5 + 0) = r0; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 0xffff - 1),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test24 (x += pkt_ptr, 5)")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void test24_x_pkt_ptr_5(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = 0xffffffff; \
+ *(u64*)(r10 - 8) = r0; \
+ r0 = *(u64*)(r10 - 8); \
+ r0 &= 0xff; \
+ r4 = r0; \
+ r0 = 64; \
+ r0 += r4; \
+ r0 += r2; \
+ r5 = r0; \
+ r0 += %[__imm_0]; \
+ if r0 > r3 goto l0_%=; \
+ *(u64*)(r5 + 0) = r0; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 0x7fff - 1),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test25 (marking on <, good access)")
+__success __retval(0)
+__naked void test25_marking_on_good_access(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 < r3 goto l0_%=; \
+l1_%=: r0 = 0; \
+ exit; \
+l0_%=: r0 = *(u8*)(r2 + 0); \
+ goto l1_%=; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test26 (marking on <, bad access)")
+__failure __msg("invalid access to packet")
+__naked void test26_marking_on_bad_access(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 < r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l1_%=: r0 = 0; \
+ exit; \
+l0_%=: goto l1_%=; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test27 (marking on <=, good access)")
+__success __retval(1)
+__naked void test27_marking_on_good_access(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r3 <= r0 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test28 (marking on <=, bad access)")
+__failure __msg("invalid access to packet")
+__naked void test28_marking_on_bad_access(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r3 <= r0 goto l0_%=; \
+l1_%=: r0 = 1; \
+ exit; \
+l0_%=: r0 = *(u8*)(r2 + 0); \
+ goto l1_%=; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test29 (reg > pkt_end in subprog)")
+__success __retval(0)
+__naked void reg_pkt_end_in_subprog(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r2 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r3 = r6; \
+ r3 += 8; \
+ call reg_pkt_end_in_subprog__1; \
+ if r0 == 0 goto l0_%=; \
+ r0 = *(u8*)(r6 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void reg_pkt_end_in_subprog__1(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ if r3 > r2 goto l0_%=; \
+ r0 = 1; \
+l0_%=: exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: test30 (check_id() in regsafe(), bad access)")
+__failure __msg("invalid access to packet, off=0 size=1, R2")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void id_in_regsafe_bad_access(void)
+{
+ asm volatile (" \
+ /* r9 = ctx */ \
+ r9 = r1; \
+ /* r7 = ktime_get_ns() */ \
+ call %[bpf_ktime_get_ns]; \
+ r7 = r0; \
+ /* r6 = ktime_get_ns() */ \
+ call %[bpf_ktime_get_ns]; \
+ r6 = r0; \
+ /* r2 = ctx->data \
+ * r3 = ctx->data \
+ * r4 = ctx->data_end \
+ */ \
+ r2 = *(u32*)(r9 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r9 + %[__sk_buff_data]); \
+ r4 = *(u32*)(r9 + %[__sk_buff_data_end]); \
+ /* if r6 > 100 goto exit \
+ * if r7 > 100 goto exit \
+ */ \
+ if r6 > 100 goto l0_%=; \
+ if r7 > 100 goto l0_%=; \
+ /* r2 += r6 ; this forces assignment of ID to r2\
+ * r2 += 1 ; get some fixed off for r2\
+ * r3 += r7 ; this forces assignment of ID to r3\
+ * r3 += 1 ; get some fixed off for r3\
+ */ \
+ r2 += r6; \
+ r2 += 1; \
+ r3 += r7; \
+ r3 += 1; \
+ /* if r6 > r7 goto +1 ; no new information about the state is derived from\
+ * ; this check, thus produced verifier states differ\
+ * ; only in 'insn_idx' \
+ * r2 = r3 ; optionally share ID between r2 and r3\
+ */ \
+ if r6 != r7 goto l1_%=; \
+ r2 = r3; \
+l1_%=: /* if r3 > ctx->data_end goto exit */ \
+ if r3 > r4 goto l0_%=; \
+ /* r5 = *(u8 *) (r2 - 1) ; access packet memory using r2,\
+ * ; this is not always safe\
+ */ \
+ r5 = *(u8*)(r2 - 1); \
+l0_%=: /* exit(0) */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c b/tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c
new file mode 100644
index 000000000000..c538c6893552
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("direct stack access with 32-bit wraparound. test1")
+__failure __msg("fp pointer and 2147483647")
+__failure_unpriv
+__naked void with_32_bit_wraparound_test1(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += 0x7fffffff; \
+ r1 += 0x7fffffff; \
+ w0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("direct stack access with 32-bit wraparound. test2")
+__failure __msg("fp pointer and 1073741823")
+__failure_unpriv
+__naked void with_32_bit_wraparound_test2(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += 0x3fffffff; \
+ r1 += 0x3fffffff; \
+ w0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("direct stack access with 32-bit wraparound. test3")
+__failure __msg("fp pointer offset 1073741822")
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__naked void with_32_bit_wraparound_test3(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += 0x1fffffff; \
+ r1 += 0x1fffffff; \
+ w0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_div0.c b/tools/testing/selftests/bpf/progs/verifier_div0.c
new file mode 100644
index 000000000000..cca5ea18fc28
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_div0.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/div0.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("DIV32 by 0, zero check 1")
+__success __success_unpriv __retval(42)
+__naked void by_0_zero_check_1_1(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = 0; \
+ w2 = 1; \
+ w2 /= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("DIV32 by 0, zero check 2")
+__success __success_unpriv __retval(42)
+__naked void by_0_zero_check_2_1(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ r1 = 0xffffffff00000000LL ll; \
+ w2 = 1; \
+ w2 /= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("DIV64 by 0, zero check")
+__success __success_unpriv __retval(42)
+__naked void div64_by_0_zero_check(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = 0; \
+ w2 = 1; \
+ r2 /= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOD32 by 0, zero check 1")
+__success __success_unpriv __retval(42)
+__naked void by_0_zero_check_1_2(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = 0; \
+ w2 = 1; \
+ w2 %%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOD32 by 0, zero check 2")
+__success __success_unpriv __retval(42)
+__naked void by_0_zero_check_2_2(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ r1 = 0xffffffff00000000LL ll; \
+ w2 = 1; \
+ w2 %%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOD64 by 0, zero check")
+__success __success_unpriv __retval(42)
+__naked void mod64_by_0_zero_check(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = 0; \
+ w2 = 1; \
+ r2 %%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("DIV32 by 0, zero check ok, cls")
+__success __retval(8)
+__naked void _0_zero_check_ok_cls_1(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = 2; \
+ w2 = 16; \
+ w2 /= w1; \
+ r0 = r2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("DIV32 by 0, zero check 1, cls")
+__success __retval(0)
+__naked void _0_zero_check_1_cls_1(void)
+{
+ asm volatile (" \
+ w1 = 0; \
+ w0 = 1; \
+ w0 /= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("DIV32 by 0, zero check 2, cls")
+__success __retval(0)
+__naked void _0_zero_check_2_cls_1(void)
+{
+ asm volatile (" \
+ r1 = 0xffffffff00000000LL ll; \
+ w0 = 1; \
+ w0 /= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("DIV64 by 0, zero check, cls")
+__success __retval(0)
+__naked void by_0_zero_check_cls(void)
+{
+ asm volatile (" \
+ w1 = 0; \
+ w0 = 1; \
+ r0 /= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("MOD32 by 0, zero check ok, cls")
+__success __retval(2)
+__naked void _0_zero_check_ok_cls_2(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = 3; \
+ w2 = 5; \
+ w2 %%= w1; \
+ r0 = r2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("MOD32 by 0, zero check 1, cls")
+__success __retval(1)
+__naked void _0_zero_check_1_cls_2(void)
+{
+ asm volatile (" \
+ w1 = 0; \
+ w0 = 1; \
+ w0 %%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("MOD32 by 0, zero check 2, cls")
+__success __retval(1)
+__naked void _0_zero_check_2_cls_2(void)
+{
+ asm volatile (" \
+ r1 = 0xffffffff00000000LL ll; \
+ w0 = 1; \
+ w0 %%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("MOD64 by 0, zero check 1, cls")
+__success __retval(2)
+__naked void _0_zero_check_1_cls_3(void)
+{
+ asm volatile (" \
+ w1 = 0; \
+ w0 = 2; \
+ r0 %%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("MOD64 by 0, zero check 2, cls")
+__success __retval(-1)
+__naked void _0_zero_check_2_cls_3(void)
+{
+ asm volatile (" \
+ w1 = 0; \
+ w0 = -1; \
+ r0 %%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_div_overflow.c b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c
new file mode 100644
index 000000000000..34e0c012ee76
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/div_overflow.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <limits.h>
+#include "bpf_misc.h"
+
+/* Just make sure that JITs used udiv/umod as otherwise we get
+ * an exception from INT_MIN/-1 overflow similarly as with div
+ * by zero.
+ */
+
+SEC("tc")
+__description("DIV32 overflow, check 1")
+__success __retval(0)
+__naked void div32_overflow_check_1(void)
+{
+ asm volatile (" \
+ w1 = -1; \
+ w0 = %[int_min]; \
+ w0 /= w1; \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("DIV32 overflow, check 2")
+__success __retval(0)
+__naked void div32_overflow_check_2(void)
+{
+ asm volatile (" \
+ w0 = %[int_min]; \
+ w0 /= -1; \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("DIV64 overflow, check 1")
+__success __retval(0)
+__naked void div64_overflow_check_1(void)
+{
+ asm volatile (" \
+ r1 = -1; \
+ r2 = %[llong_min] ll; \
+ r2 /= r1; \
+ w0 = 0; \
+ if r0 == r2 goto l0_%=; \
+ w0 = 1; \
+l0_%=: exit; \
+" :
+ : __imm_const(llong_min, LLONG_MIN)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("DIV64 overflow, check 2")
+__success __retval(0)
+__naked void div64_overflow_check_2(void)
+{
+ asm volatile (" \
+ r1 = %[llong_min] ll; \
+ r1 /= -1; \
+ w0 = 0; \
+ if r0 == r1 goto l0_%=; \
+ w0 = 1; \
+l0_%=: exit; \
+" :
+ : __imm_const(llong_min, LLONG_MIN)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("MOD32 overflow, check 1")
+__success __retval(_INT_MIN)
+__naked void mod32_overflow_check_1(void)
+{
+ asm volatile (" \
+ w1 = -1; \
+ w0 = %[int_min]; \
+ w0 %%= w1; \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("MOD32 overflow, check 2")
+__success __retval(_INT_MIN)
+__naked void mod32_overflow_check_2(void)
+{
+ asm volatile (" \
+ w0 = %[int_min]; \
+ w0 %%= -1; \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("MOD64 overflow, check 1")
+__success __retval(1)
+__naked void mod64_overflow_check_1(void)
+{
+ asm volatile (" \
+ r1 = -1; \
+ r2 = %[llong_min] ll; \
+ r3 = r2; \
+ r2 %%= r1; \
+ w0 = 0; \
+ if r3 != r2 goto l0_%=; \
+ w0 = 1; \
+l0_%=: exit; \
+" :
+ : __imm_const(llong_min, LLONG_MIN)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("MOD64 overflow, check 2")
+__success __retval(1)
+__naked void mod64_overflow_check_2(void)
+{
+ asm volatile (" \
+ r2 = %[llong_min] ll; \
+ r3 = r2; \
+ r2 %%= -1; \
+ w0 = 0; \
+ if r3 != r2 goto l0_%=; \
+ w0 = 1; \
+l0_%=: exit; \
+" :
+ : __imm_const(llong_min, LLONG_MIN)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
new file mode 100644
index 000000000000..181da86ba5f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "xdp_metadata.h"
+#include "bpf_kfuncs.h"
+
+extern struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
+extern void bpf_task_release(struct task_struct *p) __ksym __weak;
+
+__weak int subprog_trusted_task_nullable(struct task_struct *task __arg_trusted __arg_nullable)
+{
+ if (!task)
+ return 0;
+ return task->pid + task->tgid;
+}
+
+__weak int subprog_trusted_task_nullable_extra_layer(struct task_struct *task __arg_trusted __arg_nullable)
+{
+ return subprog_trusted_task_nullable(task) + subprog_trusted_task_nullable(NULL);
+}
+
+SEC("?tp_btf/task_newtask")
+__success __log_level(2)
+__msg("Validating subprog_trusted_task_nullable() func#1...")
+__msg(": R1=trusted_ptr_or_null_task_struct(")
+int trusted_task_arg_nullable(void *ctx)
+{
+ struct task_struct *t1 = bpf_get_current_task_btf();
+ struct task_struct *t2 = bpf_task_acquire(t1);
+ int res = 0;
+
+ /* known NULL */
+ res += subprog_trusted_task_nullable(NULL);
+
+ /* known non-NULL */
+ res += subprog_trusted_task_nullable(t1);
+ res += subprog_trusted_task_nullable_extra_layer(t1);
+
+ /* unknown if NULL or not */
+ res += subprog_trusted_task_nullable(t2);
+ res += subprog_trusted_task_nullable_extra_layer(t2);
+
+ if (t2) {
+ /* known non-NULL after explicit NULL check, just in case */
+ res += subprog_trusted_task_nullable(t2);
+ res += subprog_trusted_task_nullable_extra_layer(t2);
+
+ bpf_task_release(t2);
+ }
+
+ return res;
+}
+
+__weak int subprog_trusted_task_nonnull(struct task_struct *task __arg_trusted)
+{
+ return task->pid + task->tgid;
+}
+
+SEC("?kprobe")
+__failure __log_level(2)
+__msg("R1 type=scalar expected=ptr_, trusted_ptr_, rcu_ptr_")
+__msg("Caller passes invalid args into func#1 ('subprog_trusted_task_nonnull')")
+int trusted_task_arg_nonnull_fail1(void *ctx)
+{
+ return subprog_trusted_task_nonnull(NULL);
+}
+
+SEC("?tp_btf/task_newtask")
+__failure __log_level(2)
+__msg("R1 type=ptr_or_null_ expected=ptr_, trusted_ptr_, rcu_ptr_")
+__msg("Caller passes invalid args into func#1 ('subprog_trusted_task_nonnull')")
+int trusted_task_arg_nonnull_fail2(void *ctx)
+{
+ struct task_struct *t = bpf_get_current_task_btf();
+ struct task_struct *nullable;
+ int res;
+
+ nullable = bpf_task_acquire(t);
+
+ /* should fail, PTR_TO_BTF_ID_OR_NULL */
+ res = subprog_trusted_task_nonnull(nullable);
+
+ if (nullable)
+ bpf_task_release(nullable);
+
+ return res;
+}
+
+SEC("?kprobe")
+__success __log_level(2)
+__msg("Validating subprog_trusted_task_nonnull() func#1...")
+__msg(": R1=trusted_ptr_task_struct(")
+int trusted_task_arg_nonnull(void *ctx)
+{
+ struct task_struct *t = bpf_get_current_task_btf();
+
+ return subprog_trusted_task_nonnull(t);
+}
+
+struct task_struct___local {} __attribute__((preserve_access_index));
+
+__weak int subprog_nullable_task_flavor(
+ struct task_struct___local *task __arg_trusted __arg_nullable)
+{
+ char buf[16];
+
+ if (!task)
+ return 0;
+
+ return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0);
+}
+
+SEC("?uprobe.s")
+__success __log_level(2)
+__msg("Validating subprog_nullable_task_flavor() func#1...")
+__msg(": R1=trusted_ptr_or_null_task_struct(")
+int flavor_ptr_nullable(void *ctx)
+{
+ struct task_struct___local *t = (void *)bpf_get_current_task_btf();
+
+ return subprog_nullable_task_flavor(t);
+}
+
+__weak int subprog_nonnull_task_flavor(struct task_struct___local *task __arg_trusted)
+{
+ char buf[16];
+
+ return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0);
+}
+
+SEC("?uprobe.s")
+__success __log_level(2)
+__msg("Validating subprog_nonnull_task_flavor() func#1...")
+__msg(": R1=trusted_ptr_task_struct(")
+int flavor_ptr_nonnull(void *ctx)
+{
+ struct task_struct *t = bpf_get_current_task_btf();
+
+ return subprog_nonnull_task_flavor((void *)t);
+}
+
+__weak int subprog_trusted_destroy(struct task_struct *task __arg_trusted)
+{
+ bpf_task_release(task); /* should be rejected */
+
+ return 0;
+}
+
+SEC("?tp_btf/task_newtask")
+__failure __log_level(2)
+__msg("release kernel function bpf_task_release expects refcounted PTR_TO_BTF_ID")
+int BPF_PROG(trusted_destroy_fail, struct task_struct *task, u64 clone_flags)
+{
+ return subprog_trusted_destroy(task);
+}
+
+__weak int subprog_trusted_acq_rel(struct task_struct *task __arg_trusted)
+{
+ struct task_struct *owned;
+
+ owned = bpf_task_acquire(task);
+ if (!owned)
+ return 0;
+
+ bpf_task_release(owned); /* this one is OK, we acquired it locally */
+
+ return 0;
+}
+
+SEC("?tp_btf/task_newtask")
+__success __log_level(2)
+int BPF_PROG(trusted_acq_rel, struct task_struct *task, u64 clone_flags)
+{
+ return subprog_trusted_acq_rel(task);
+}
+
+__weak int subprog_untrusted_bad_tags(struct task_struct *task __arg_untrusted __arg_nullable)
+{
+ return task->pid;
+}
+
+SEC("tp_btf/sys_enter")
+__failure
+__msg("arg#0 untrusted cannot be combined with any other tags")
+int untrusted_bad_tags(void *ctx)
+{
+ return subprog_untrusted_bad_tags(0);
+}
+
+struct local_type_wont_be_accepted {};
+
+__weak int subprog_untrusted_bad_type(struct local_type_wont_be_accepted *p __arg_untrusted)
+{
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+__failure
+__msg("arg#0 reference type('STRUCT local_type_wont_be_accepted') has no matches")
+int untrusted_bad_type(void *ctx)
+{
+ return subprog_untrusted_bad_type(bpf_rdonly_cast(0, 0));
+}
+
+__weak int subprog_untrusted(const volatile struct task_struct *restrict task __arg_untrusted)
+{
+ return task->pid;
+}
+
+SEC("tp_btf/sys_enter")
+__success
+__log_level(2)
+__msg("r1 = {{.*}}; {{.*}}R1_w=trusted_ptr_task_struct()")
+__msg("Func#1 ('subprog_untrusted') is global and assumed valid.")
+__msg("Validating subprog_untrusted() func#1...")
+__msg(": R1=untrusted_ptr_task_struct")
+int trusted_to_untrusted(void *ctx)
+{
+ return subprog_untrusted(bpf_get_current_task_btf());
+}
+
+char mem[16];
+u32 off;
+
+SEC("tp_btf/sys_enter")
+__success
+int anything_to_untrusted(void *ctx)
+{
+ /* untrusted to untrusted */
+ subprog_untrusted(bpf_core_cast(0, struct task_struct));
+ /* wrong type to untrusted */
+ subprog_untrusted((void *)bpf_core_cast(0, struct bpf_verifier_env));
+ /* map value to untrusted */
+ subprog_untrusted((void *)mem);
+ /* scalar to untrusted */
+ subprog_untrusted(0);
+ /* variable offset to untrusted (map) */
+ subprog_untrusted((void *)mem + off);
+ /* variable offset to untrusted (trusted) */
+ subprog_untrusted((void *)bpf_get_current_task_btf() + off);
+ return 0;
+}
+
+__weak int subprog_untrusted2(struct task_struct *task __arg_untrusted)
+{
+ return subprog_trusted_task_nullable(task);
+}
+
+SEC("tp_btf/sys_enter")
+__failure
+__msg("R1 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_")
+__msg("Caller passes invalid args into func#{{.*}} ('subprog_trusted_task_nullable')")
+int untrusted_to_trusted(void *ctx)
+{
+ return subprog_untrusted2(bpf_get_current_task_btf());
+}
+
+__weak int subprog_void_untrusted(void *p __arg_untrusted)
+{
+ return *(int *)p;
+}
+
+__weak int subprog_char_untrusted(char *p __arg_untrusted)
+{
+ return *(int *)p;
+}
+
+__weak int subprog_enum_untrusted(enum bpf_attach_type *p __arg_untrusted)
+{
+ return *(int *)p;
+}
+
+SEC("tp_btf/sys_enter")
+__success
+__log_level(2)
+__msg("r1 = {{.*}}; {{.*}}R1_w=trusted_ptr_task_struct()")
+__msg("Func#1 ('subprog_void_untrusted') is global and assumed valid.")
+__msg("Validating subprog_void_untrusted() func#1...")
+__msg(": R1=rdonly_untrusted_mem(sz=0)")
+int trusted_to_untrusted_mem(void *ctx)
+{
+ return subprog_void_untrusted(bpf_get_current_task_btf());
+}
+
+SEC("tp_btf/sys_enter")
+__success
+int anything_to_untrusted_mem(void *ctx)
+{
+ /* untrusted to untrusted mem */
+ subprog_void_untrusted(bpf_core_cast(0, struct task_struct));
+ /* map value to untrusted mem */
+ subprog_void_untrusted(mem);
+ /* scalar to untrusted mem */
+ subprog_void_untrusted(0);
+ /* variable offset to untrusted mem (map) */
+ subprog_void_untrusted((void *)mem + off);
+ /* variable offset to untrusted mem (trusted) */
+ subprog_void_untrusted(bpf_get_current_task_btf() + off);
+ /* variable offset to untrusted char/enum (map) */
+ subprog_char_untrusted(mem + off);
+ subprog_enum_untrusted((void *)mem + off);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
new file mode 100644
index 000000000000..20904cd2baa2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "xdp_metadata.h"
+#include "bpf_kfuncs.h"
+#include "err.h"
+
+/* The compiler may be able to detect the access to uninitialized
+ memory in the routines performing out of bound memory accesses and
+ emit warnings about it. This is the case of GCC. */
+#if !defined(__clang__)
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+
+int arr[1];
+int unkn_idx;
+const volatile bool call_dead_subprog = false;
+
+__noinline long global_bad(void)
+{
+ return arr[unkn_idx]; /* BOOM */
+}
+
+__noinline long global_good(void)
+{
+ return arr[0];
+}
+
+__noinline long global_calls_bad(void)
+{
+ return global_good() + global_bad() /* does BOOM indirectly */;
+}
+
+__noinline long global_calls_good_only(void)
+{
+ return global_good();
+}
+
+__noinline long global_dead(void)
+{
+ return arr[0] * 2;
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+/* main prog is validated completely first */
+__msg("('global_calls_good_only') is global and assumed valid.")
+/* eventually global_good() is transitively validated as well */
+__msg("Validating global_good() func")
+__msg("('global_good') is safe for any args that match its prototype")
+int chained_global_func_calls_success(void)
+{
+ int sum = 0;
+
+ if (call_dead_subprog)
+ sum += global_dead();
+ return global_calls_good_only() + sum;
+}
+
+SEC("?raw_tp")
+__failure __log_level(2)
+/* main prog validated successfully first */
+__msg("('global_calls_bad') is global and assumed valid.")
+/* eventually we validate global_bad() and fail */
+__msg("Validating global_bad() func")
+__msg("math between map_value pointer and register") /* BOOM */
+int chained_global_func_calls_bad(void)
+{
+ return global_calls_bad();
+}
+
+/* do out of bounds access forcing verifier to fail verification if this
+ * global func is called
+ */
+__noinline int global_unsupp(const int *mem)
+{
+ if (!mem)
+ return 0;
+ return mem[100]; /* BOOM */
+}
+
+const volatile bool skip_unsupp_global = true;
+
+SEC("?raw_tp")
+__success
+int guarded_unsupp_global_called(void)
+{
+ if (!skip_unsupp_global)
+ return global_unsupp(NULL);
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __log_level(2)
+__msg("Func#1 ('global_unsupp') is global and assumed valid.")
+__msg("Validating global_unsupp() func#1...")
+__msg("value is outside of the allowed memory range")
+int unguarded_unsupp_global_called(void)
+{
+ int x = 0;
+
+ return global_unsupp(&x);
+}
+
+long stack[128];
+
+__weak int subprog_nullable_ptr_bad(int *p)
+{
+ return (*p) * 2; /* bad, missing null check */
+}
+
+SEC("?raw_tp")
+__failure __log_level(2)
+__msg("invalid mem access 'mem_or_null'")
+int arg_tag_nullable_ptr_fail(void *ctx)
+{
+ int x = 42;
+
+ return subprog_nullable_ptr_bad(&x);
+}
+
+typedef struct {
+ int x;
+} user_struct_t;
+
+__noinline __weak int subprog_user_anon_mem(user_struct_t *t)
+{
+ return t ? t->x : 0;
+}
+
+SEC("?tracepoint")
+__failure __log_level(2)
+__msg("invalid bpf_context access")
+__msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')")
+int anon_user_mem_invalid(void *ctx)
+{
+ /* can't pass PTR_TO_CTX as user memory */
+ return subprog_user_anon_mem(ctx);
+}
+
+SEC("?tracepoint")
+__success __log_level(2)
+__msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype")
+int anon_user_mem_valid(void *ctx)
+{
+ user_struct_t t = { .x = 42 };
+
+ return subprog_user_anon_mem(&t);
+}
+
+__noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull)
+{
+ return (*p1) * (*p2); /* good, no need for NULL checks */
+}
+
+int x = 47;
+
+SEC("?raw_tp")
+__success __log_level(2)
+int arg_tag_nonnull_ptr_good(void *ctx)
+{
+ int y = 74;
+
+ return subprog_nonnull_ptr_good(&x, &y);
+}
+
+/* this global subprog can be now called from many types of entry progs, each
+ * with different context type
+ */
+__weak int subprog_ctx_tag(void *ctx __arg_ctx)
+{
+ return bpf_get_stack(ctx, stack, sizeof(stack), 0);
+}
+
+__weak int raw_tp_canonical(struct bpf_raw_tracepoint_args *ctx __arg_ctx)
+{
+ return 0;
+}
+
+__weak int raw_tp_u64_array(u64 *ctx __arg_ctx)
+{
+ return 0;
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+int arg_tag_ctx_raw_tp(void *ctx)
+{
+ return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
+}
+
+SEC("?raw_tp.w")
+__success __log_level(2)
+int arg_tag_ctx_raw_tp_writable(void *ctx)
+{
+ return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
+}
+
+SEC("?tp_btf/sys_enter")
+__success __log_level(2)
+int arg_tag_ctx_raw_tp_btf(void *ctx)
+{
+ return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
+}
+
+struct whatever { };
+
+__weak int tp_whatever(struct whatever *ctx __arg_ctx)
+{
+ return 0;
+}
+
+SEC("?tp")
+__success __log_level(2)
+int arg_tag_ctx_tp(void *ctx)
+{
+ return subprog_ctx_tag(ctx) + tp_whatever(ctx);
+}
+
+__weak int kprobe_subprog_pt_regs(struct pt_regs *ctx __arg_ctx)
+{
+ return 0;
+}
+
+__weak int kprobe_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
+{
+ return 0;
+}
+
+SEC("?kprobe")
+__success __log_level(2)
+int arg_tag_ctx_kprobe(void *ctx)
+{
+ return subprog_ctx_tag(ctx) +
+ kprobe_subprog_pt_regs(ctx) +
+ kprobe_subprog_typedef(ctx);
+}
+
+__weak int perf_subprog_regs(
+#if defined(bpf_target_riscv)
+ struct user_regs_struct *ctx __arg_ctx
+#elif defined(bpf_target_s390)
+ /* user_pt_regs typedef is anonymous struct, so only `void *` works */
+ void *ctx __arg_ctx
+#elif defined(bpf_target_loongarch) || defined(bpf_target_arm64) || defined(bpf_target_powerpc)
+ struct user_pt_regs *ctx __arg_ctx
+#else
+ struct pt_regs *ctx __arg_ctx
+#endif
+)
+{
+ return 0;
+}
+
+__weak int perf_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
+{
+ return 0;
+}
+
+__weak int perf_subprog_canonical(struct bpf_perf_event_data *ctx __arg_ctx)
+{
+ return 0;
+}
+
+SEC("?perf_event")
+__success __log_level(2)
+int arg_tag_ctx_perf(void *ctx)
+{
+ return subprog_ctx_tag(ctx) +
+ perf_subprog_regs(ctx) +
+ perf_subprog_typedef(ctx) +
+ perf_subprog_canonical(ctx);
+}
+
+__weak int iter_subprog_void(void *ctx __arg_ctx)
+{
+ return 0;
+}
+
+__weak int iter_subprog_typed(struct bpf_iter__task *ctx __arg_ctx)
+{
+ return 0;
+}
+
+SEC("?iter/task")
+__success __log_level(2)
+int arg_tag_ctx_iter_task(struct bpf_iter__task *ctx)
+{
+ return (iter_subprog_void(ctx) + iter_subprog_typed(ctx)) & 1;
+}
+
+__weak int tracing_subprog_void(void *ctx __arg_ctx)
+{
+ return 0;
+}
+
+__weak int tracing_subprog_u64(u64 *ctx __arg_ctx)
+{
+ return 0;
+}
+
+int acc;
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+__success __log_level(2)
+int BPF_PROG(arg_tag_ctx_fentry)
+{
+ acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
+ return 0;
+}
+
+SEC("?fexit/" SYS_PREFIX "sys_nanosleep")
+__success __log_level(2)
+int BPF_PROG(arg_tag_ctx_fexit)
+{
+ acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
+ return 0;
+}
+
+SEC("?fmod_ret/" SYS_PREFIX "sys_nanosleep")
+__success __log_level(2)
+int BPF_PROG(arg_tag_ctx_fmod_ret)
+{
+ return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
+}
+
+SEC("?lsm/bpf")
+__success __log_level(2)
+int BPF_PROG(arg_tag_ctx_lsm)
+{
+ int ret;
+
+ ret = tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
+ set_if_not_errno_or_zero(ret, -1);
+ return ret;
+}
+
+SEC("?struct_ops/test_1")
+__success __log_level(2)
+int BPF_PROG(arg_tag_ctx_struct_ops)
+{
+ return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
+}
+
+SEC(".struct_ops")
+struct bpf_dummy_ops dummy_1 = {
+ .test_1 = (void *)arg_tag_ctx_struct_ops,
+};
+
+SEC("?syscall")
+__success __log_level(2)
+int arg_tag_ctx_syscall(void *ctx)
+{
+ return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx);
+}
+
+__weak int subprog_dynptr(struct bpf_dynptr *dptr)
+{
+ long *d, t, buf[1] = {};
+
+ d = bpf_dynptr_data(dptr, 0, sizeof(long));
+ if (!d)
+ return 0;
+
+ t = *d + 1;
+
+ d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long));
+ if (!d)
+ return t;
+
+ t = *d + 2;
+
+ return t;
+}
+
+SEC("?xdp")
+__success __log_level(2)
+int arg_tag_dynptr(struct xdp_md *ctx)
+{
+ struct bpf_dynptr dptr;
+
+ bpf_dynptr_from_xdp(ctx, 0, &dptr);
+
+ return subprog_dynptr(&dptr);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_gotol.c b/tools/testing/selftests/bpf/progs/verifier_gotol.c
new file mode 100644
index 000000000000..d5d8f24df394
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_gotol.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#ifdef CAN_USE_GOTOL
+
+SEC("socket")
+__description("gotol, small_imm")
+__success __success_unpriv __retval(1)
+__naked void gotol_small_imm(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 == 0 goto l0_%=; \
+ gotol l1_%=; \
+l2_%=: \
+ gotol l3_%=; \
+l1_%=: \
+ r0 = 1; \
+ gotol l2_%=; \
+l0_%=: \
+ r0 = 2; \
+l3_%=: \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("gotol, large_imm")
+__success __failure_unpriv __retval(40000)
+__naked void gotol_large_imm(void)
+{
+ asm volatile (" \
+ gotol 1f; \
+0: \
+ r0 = 0; \
+ .rept 40000; \
+ r0 += 1; \
+ .endr; \
+ exit; \
+1: gotol 0b; \
+" :
+ :
+ : __clobber_all);
+}
+
+#else
+
+SEC("socket")
+__description("cpuv4 is not supported by compiler or jit, use a dummy test")
+__success
+int dummy_test(void)
+{
+ return 0;
+}
+
+#endif
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c b/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c
new file mode 100644
index 000000000000..f2c54e4d89eb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c
@@ -0,0 +1,825 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/helper_access_var_len.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
+} map_ringbuf SEC(".maps");
+
+SEC("tracepoint")
+__description("helper access to variable memory: stack, bitwise AND + JMP, correct bounds")
+__success
+__naked void bitwise_and_jmp_correct_bounds(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -64; \
+ r0 = 0; \
+ *(u64*)(r10 - 64) = r0; \
+ *(u64*)(r10 - 56) = r0; \
+ *(u64*)(r10 - 48) = r0; \
+ *(u64*)(r10 - 40) = r0; \
+ *(u64*)(r10 - 32) = r0; \
+ *(u64*)(r10 - 24) = r0; \
+ *(u64*)(r10 - 16) = r0; \
+ *(u64*)(r10 - 8) = r0; \
+ r2 = 16; \
+ *(u64*)(r1 - 128) = r2; \
+ r2 = *(u64*)(r1 - 128); \
+ r2 &= 64; \
+ r4 = 0; \
+ if r4 >= r2 goto l0_%=; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("helper access to variable memory: stack, bitwise AND, zero included")
+/* in privileged mode reads from uninitialized stack locations are permitted */
+__success __failure_unpriv
+__msg_unpriv("invalid read from stack R2 off -64+0 size 64")
+__retval(0)
+__naked void stack_bitwise_and_zero_included(void)
+{
+ asm volatile (" \
+ /* set max stack size */ \
+ r6 = 0; \
+ *(u64*)(r10 - 128) = r6; \
+ /* set r3 to a random value */ \
+ call %[bpf_get_prandom_u32]; \
+ r3 = r0; \
+ /* use bitwise AND to limit r3 range to [0, 64] */\
+ r3 &= 64; \
+ r1 = %[map_ringbuf] ll; \
+ r2 = r10; \
+ r2 += -64; \
+ r4 = 0; \
+ /* Call bpf_ringbuf_output(), it is one of a few helper functions with\
+ * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
+ * For unpriv this should signal an error, because memory at &fp[-64] is\
+ * not initialized. \
+ */ \
+ call %[bpf_ringbuf_output]; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_ringbuf_output),
+ __imm_addr(map_ringbuf)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: stack, bitwise AND + JMP, wrong max")
+__failure __msg("invalid write to stack R1 off=-64 size=65")
+__naked void bitwise_and_jmp_wrong_max(void)
+{
+ asm volatile (" \
+ r2 = *(u64*)(r1 + 8); \
+ r1 = r10; \
+ r1 += -64; \
+ *(u64*)(r1 - 128) = r2; \
+ r2 = *(u64*)(r1 - 128); \
+ r2 &= 65; \
+ r4 = 0; \
+ if r4 >= r2 goto l0_%=; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: stack, JMP, correct bounds")
+__success
+__naked void memory_stack_jmp_correct_bounds(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -64; \
+ r0 = 0; \
+ *(u64*)(r10 - 64) = r0; \
+ *(u64*)(r10 - 56) = r0; \
+ *(u64*)(r10 - 48) = r0; \
+ *(u64*)(r10 - 40) = r0; \
+ *(u64*)(r10 - 32) = r0; \
+ *(u64*)(r10 - 24) = r0; \
+ *(u64*)(r10 - 16) = r0; \
+ *(u64*)(r10 - 8) = r0; \
+ r2 = 16; \
+ *(u64*)(r1 - 128) = r2; \
+ r2 = *(u64*)(r1 - 128); \
+ if r2 > 64 goto l0_%=; \
+ r4 = 0; \
+ if r4 >= r2 goto l0_%=; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: stack, JMP (signed), correct bounds")
+__success
+__naked void stack_jmp_signed_correct_bounds(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -64; \
+ r0 = 0; \
+ *(u64*)(r10 - 64) = r0; \
+ *(u64*)(r10 - 56) = r0; \
+ *(u64*)(r10 - 48) = r0; \
+ *(u64*)(r10 - 40) = r0; \
+ *(u64*)(r10 - 32) = r0; \
+ *(u64*)(r10 - 24) = r0; \
+ *(u64*)(r10 - 16) = r0; \
+ *(u64*)(r10 - 8) = r0; \
+ r2 = 16; \
+ *(u64*)(r1 - 128) = r2; \
+ r2 = *(u64*)(r1 - 128); \
+ if r2 s> 64 goto l0_%=; \
+ r4 = 0; \
+ if r4 s>= r2 goto l0_%=; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: stack, JMP, bounds + offset")
+__failure __msg("invalid write to stack R1 off=-64 size=65")
+__naked void memory_stack_jmp_bounds_offset(void)
+{
+ asm volatile (" \
+ r2 = *(u64*)(r1 + 8); \
+ r1 = r10; \
+ r1 += -64; \
+ *(u64*)(r1 - 128) = r2; \
+ r2 = *(u64*)(r1 - 128); \
+ if r2 > 64 goto l0_%=; \
+ r4 = 0; \
+ if r4 >= r2 goto l0_%=; \
+ r2 += 1; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: stack, JMP, wrong max")
+__failure __msg("invalid write to stack R1 off=-64 size=65")
+__naked void memory_stack_jmp_wrong_max(void)
+{
+ asm volatile (" \
+ r2 = *(u64*)(r1 + 8); \
+ r1 = r10; \
+ r1 += -64; \
+ *(u64*)(r1 - 128) = r2; \
+ r2 = *(u64*)(r1 - 128); \
+ if r2 > 65 goto l0_%=; \
+ r4 = 0; \
+ if r4 >= r2 goto l0_%=; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: stack, JMP, no max check")
+__failure
+/* because max wasn't checked, signed min is negative */
+__msg("R2 min value is negative, either use unsigned or 'var &= const'")
+__naked void stack_jmp_no_max_check(void)
+{
+ asm volatile (" \
+ r2 = *(u64*)(r1 + 8); \
+ r1 = r10; \
+ r1 += -64; \
+ *(u64*)(r1 - 128) = r2; \
+ r2 = *(u64*)(r1 - 128); \
+ r4 = 0; \
+ if r4 >= r2 goto l0_%=; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("helper access to variable memory: stack, JMP, no min check")
+/* in privileged mode reads from uninitialized stack locations are permitted */
+__success __failure_unpriv
+__msg_unpriv("invalid read from stack R2 off -64+0 size 64")
+__retval(0)
+__naked void stack_jmp_no_min_check(void)
+{
+ asm volatile (" \
+ /* set max stack size */ \
+ r6 = 0; \
+ *(u64*)(r10 - 128) = r6; \
+ /* set r3 to a random value */ \
+ call %[bpf_get_prandom_u32]; \
+ r3 = r0; \
+ /* use JMP to limit r3 range to [0, 64] */ \
+ if r3 > 64 goto l0_%=; \
+ r1 = %[map_ringbuf] ll; \
+ r2 = r10; \
+ r2 += -64; \
+ r4 = 0; \
+ /* Call bpf_ringbuf_output(), it is one of a few helper functions with\
+ * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
+ * For unpriv this should signal an error, because memory at &fp[-64] is\
+ * not initialized. \
+ */ \
+ call %[bpf_ringbuf_output]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_ringbuf_output),
+ __imm_addr(map_ringbuf)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: stack, JMP (signed), no min check")
+__failure __msg("R2 min value is negative")
+__naked void jmp_signed_no_min_check(void)
+{
+ asm volatile (" \
+ r2 = *(u64*)(r1 + 8); \
+ r1 = r10; \
+ r1 += -64; \
+ *(u64*)(r1 - 128) = r2; \
+ r2 = *(u64*)(r1 - 128); \
+ if r2 s> 64 goto l0_%=; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+ r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: map, JMP, correct bounds")
+__success
+__naked void memory_map_jmp_correct_bounds(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = %[sizeof_test_val]; \
+ *(u64*)(r10 - 128) = r2; \
+ r2 = *(u64*)(r10 - 128); \
+ if r2 s> %[sizeof_test_val] goto l1_%=; \
+ r4 = 0; \
+ if r4 s>= r2 goto l1_%=; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l1_%=: r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(sizeof_test_val, sizeof(struct test_val))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: map, JMP, wrong max")
+__failure __msg("invalid access to map value, value_size=48 off=0 size=49")
+__naked void memory_map_jmp_wrong_max(void)
+{
+ asm volatile (" \
+ r6 = *(u64*)(r1 + 8); \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = r6; \
+ *(u64*)(r10 - 128) = r2; \
+ r2 = *(u64*)(r10 - 128); \
+ if r2 s> %[__imm_0] goto l1_%=; \
+ r4 = 0; \
+ if r4 s>= r2 goto l1_%=; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l1_%=: r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, sizeof(struct test_val) + 1)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: map adjusted, JMP, correct bounds")
+__success
+__naked void map_adjusted_jmp_correct_bounds(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r1 += 20; \
+ r2 = %[sizeof_test_val]; \
+ *(u64*)(r10 - 128) = r2; \
+ r2 = *(u64*)(r10 - 128); \
+ if r2 s> %[__imm_0] goto l1_%=; \
+ r4 = 0; \
+ if r4 s>= r2 goto l1_%=; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l1_%=: r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, sizeof(struct test_val) - 20),
+ __imm_const(sizeof_test_val, sizeof(struct test_val))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: map adjusted, JMP, wrong max")
+__failure __msg("R1 min value is outside of the allowed memory range")
+__naked void map_adjusted_jmp_wrong_max(void)
+{
+ asm volatile (" \
+ r6 = *(u64*)(r1 + 8); \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r1 += 20; \
+ r2 = r6; \
+ *(u64*)(r10 - 128) = r2; \
+ r2 = *(u64*)(r10 - 128); \
+ if r2 s> %[__imm_0] goto l1_%=; \
+ r4 = 0; \
+ if r4 s>= r2 goto l1_%=; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l1_%=: r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, sizeof(struct test_val) - 19)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)")
+__success __retval(0)
+__naked void ptr_to_mem_or_null_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ r2 = 0; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+ exit; \
+" :
+ : __imm(bpf_csum_diff)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)")
+__failure __msg("R1 type=scalar expected=fp")
+__naked void ptr_to_mem_or_null_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + 0); \
+ r1 = 0; \
+ *(u64*)(r10 - 128) = r2; \
+ r2 = *(u64*)(r10 - 128); \
+ r2 &= 64; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+ exit; \
+" :
+ : __imm(bpf_csum_diff)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)")
+__success __retval(0)
+__naked void ptr_to_mem_or_null_3(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -8; \
+ r2 = 0; \
+ *(u64*)(r1 + 0) = r2; \
+ r2 &= 8; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+ exit; \
+" :
+ : __imm(bpf_csum_diff)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)")
+__success __retval(0)
+__naked void ptr_to_mem_or_null_4(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = 0; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)")
+__success __retval(0)
+__naked void ptr_to_mem_or_null_5(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = *(u64*)(r0 + 0); \
+ if r2 > 8 goto l0_%=; \
+ r1 = r10; \
+ r1 += -8; \
+ *(u64*)(r1 + 0) = r2; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)")
+__success __retval(0)
+__naked void ptr_to_mem_or_null_6(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = *(u64*)(r0 + 0); \
+ if r2 > 8 goto l0_%=; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)")
+__success __retval(0)
+/* csum_diff of 64-byte packet */
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void ptr_to_mem_or_null_7(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r6; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r1 = r6; \
+ r2 = *(u64*)(r6 + 0); \
+ if r2 > 8 goto l0_%=; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)")
+__failure __msg("R1 type=scalar expected=fp")
+__naked void ptr_to_mem_or_null_8(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ r2 = 0; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)")
+__failure __msg("R1 type=scalar expected=fp")
+__naked void ptr_to_mem_or_null_9(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ r2 = 1; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)")
+__success
+__naked void ptr_to_mem_or_null_10(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -8; \
+ r2 = 0; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)")
+__success
+__naked void ptr_to_mem_or_null_11(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = 0; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)")
+__success
+__naked void ptr_to_mem_or_null_12(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = *(u64*)(r0 + 0); \
+ if r2 > 8 goto l0_%=; \
+ r1 = r10; \
+ r1 += -8; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)")
+__success
+__naked void ptr_to_mem_or_null_13(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = *(u64*)(r0 + 0); \
+ if r2 > 8 goto l0_%=; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("helper access to variable memory: 8 bytes leak")
+/* in privileged mode reads from uninitialized stack locations are permitted */
+__success __failure_unpriv
+__msg_unpriv("invalid read from stack R2 off -64+32 size 64")
+__retval(0)
+__naked void variable_memory_8_bytes_leak(void)
+{
+ asm volatile (" \
+ /* set max stack size */ \
+ r6 = 0; \
+ *(u64*)(r10 - 128) = r6; \
+ /* set r3 to a random value */ \
+ call %[bpf_get_prandom_u32]; \
+ r3 = r0; \
+ r1 = %[map_ringbuf] ll; \
+ r2 = r10; \
+ r2 += -64; \
+ r0 = 0; \
+ *(u64*)(r10 - 64) = r0; \
+ *(u64*)(r10 - 56) = r0; \
+ *(u64*)(r10 - 48) = r0; \
+ *(u64*)(r10 - 40) = r0; \
+ /* Note: fp[-32] left uninitialized */ \
+ *(u64*)(r10 - 24) = r0; \
+ *(u64*)(r10 - 16) = r0; \
+ *(u64*)(r10 - 8) = r0; \
+ /* Limit r3 range to [1, 64] */ \
+ r3 &= 63; \
+ r3 += 1; \
+ r4 = 0; \
+ /* Call bpf_ringbuf_output(), it is one of a few helper functions with\
+ * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
+ * For unpriv this should signal an error, because memory region [1, 64]\
+ * at &fp[-64] is not fully initialized. \
+ */ \
+ call %[bpf_ringbuf_output]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_ringbuf_output),
+ __imm_addr(map_ringbuf)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to variable memory: 8 bytes no leak (init memory)")
+__success
+__naked void bytes_no_leak_init_memory(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r0 = 0; \
+ r0 = 0; \
+ *(u64*)(r10 - 64) = r0; \
+ *(u64*)(r10 - 56) = r0; \
+ *(u64*)(r10 - 48) = r0; \
+ *(u64*)(r10 - 40) = r0; \
+ *(u64*)(r10 - 32) = r0; \
+ *(u64*)(r10 - 24) = r0; \
+ *(u64*)(r10 - 16) = r0; \
+ *(u64*)(r10 - 8) = r0; \
+ r1 += -64; \
+ r2 = 0; \
+ r2 &= 32; \
+ r2 += 32; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+ r1 = *(u64*)(r10 - 16); \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c
new file mode 100644
index 000000000000..74f5f9cd153d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/helper_packet_access.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("xdp")
+__description("helper access to packet: test1, valid packet_ptr range")
+__success __retval(0)
+__naked void test1_valid_packet_ptr_range(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 > r3 goto l0_%=; \
+ r1 = %[map_hash_8b] ll; \
+ r3 = r2; \
+ r4 = 0; \
+ call %[bpf_map_update_elem]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_update_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("helper access to packet: test2, unchecked packet_ptr")
+__failure __msg("invalid access to packet")
+__naked void packet_test2_unchecked_packet_ptr(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(xdp_md_data, offsetof(struct xdp_md, data))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("helper access to packet: test3, variable add")
+__success __retval(0)
+__naked void to_packet_test3_variable_add(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r4 = r2; \
+ r4 += 8; \
+ if r4 > r3 goto l0_%=; \
+ r5 = *(u8*)(r2 + 0); \
+ r4 = r2; \
+ r4 += r5; \
+ r5 = r4; \
+ r5 += 8; \
+ if r5 > r3 goto l0_%=; \
+ r1 = %[map_hash_8b] ll; \
+ r2 = r4; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("helper access to packet: test4, packet_ptr with bad range")
+__failure __msg("invalid access to packet")
+__naked void packet_ptr_with_bad_range_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r4 = r2; \
+ r4 += 4; \
+ if r4 > r3 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("helper access to packet: test5, packet_ptr with too short range")
+__failure __msg("invalid access to packet")
+__naked void ptr_with_too_short_range_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r2 += 1; \
+ r4 = r2; \
+ r4 += 7; \
+ if r4 > r3 goto l0_%=; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test6, cls valid packet_ptr range")
+__success __retval(0)
+__naked void cls_valid_packet_ptr_range(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 > r3 goto l0_%=; \
+ r1 = %[map_hash_8b] ll; \
+ r3 = r2; \
+ r4 = 0; \
+ call %[bpf_map_update_elem]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_update_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test7, cls unchecked packet_ptr")
+__failure __msg("invalid access to packet")
+__naked void test7_cls_unchecked_packet_ptr(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test8, cls variable add")
+__success __retval(0)
+__naked void packet_test8_cls_variable_add(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r4 = r2; \
+ r4 += 8; \
+ if r4 > r3 goto l0_%=; \
+ r5 = *(u8*)(r2 + 0); \
+ r4 = r2; \
+ r4 += r5; \
+ r5 = r4; \
+ r5 += 8; \
+ if r5 > r3 goto l0_%=; \
+ r1 = %[map_hash_8b] ll; \
+ r2 = r4; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test9, cls packet_ptr with bad range")
+__failure __msg("invalid access to packet")
+__naked void packet_ptr_with_bad_range_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r4 = r2; \
+ r4 += 4; \
+ if r4 > r3 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test10, cls packet_ptr with too short range")
+__failure __msg("invalid access to packet")
+__naked void ptr_with_too_short_range_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r2 += 1; \
+ r4 = r2; \
+ r4 += 7; \
+ if r4 > r3 goto l0_%=; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test11, cls unsuitable helper 1")
+__failure __msg("helper access to the packet")
+__naked void test11_cls_unsuitable_helper_1(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r6 += 1; \
+ r3 = r6; \
+ r3 += 7; \
+ if r3 > r7 goto l0_%=; \
+ r2 = 0; \
+ r4 = 42; \
+ r5 = 0; \
+ call %[bpf_skb_store_bytes]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_skb_store_bytes),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test12, cls unsuitable helper 2")
+__failure __msg("helper access to the packet")
+__naked void test12_cls_unsuitable_helper_2(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r3 = r6; \
+ r6 += 8; \
+ if r6 > r7 goto l0_%=; \
+ r2 = 0; \
+ r4 = 4; \
+ call %[bpf_skb_load_bytes]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test13, cls helper ok")
+__success __retval(0)
+__naked void packet_test13_cls_helper_ok(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r6 += 1; \
+ r1 = r6; \
+ r1 += 7; \
+ if r1 > r7 goto l0_%=; \
+ r1 = r6; \
+ r2 = 4; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test14, cls helper ok sub")
+__success __retval(0)
+__naked void test14_cls_helper_ok_sub(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r6 += 1; \
+ r1 = r6; \
+ r1 += 7; \
+ if r1 > r7 goto l0_%=; \
+ r1 -= 4; \
+ r2 = 4; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test15, cls helper fail sub")
+__failure __msg("invalid access to packet")
+__naked void test15_cls_helper_fail_sub(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r6 += 1; \
+ r1 = r6; \
+ r1 += 7; \
+ if r1 > r7 goto l0_%=; \
+ r1 -= 12; \
+ r2 = 4; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test16, cls helper fail range 1")
+__failure __msg("invalid access to packet")
+__naked void cls_helper_fail_range_1(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r6 += 1; \
+ r1 = r6; \
+ r1 += 7; \
+ if r1 > r7 goto l0_%=; \
+ r1 = r6; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test17, cls helper fail range 2")
+__failure __msg("R2 min value is negative")
+__naked void cls_helper_fail_range_2(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r6 += 1; \
+ r1 = r6; \
+ r1 += 7; \
+ if r1 > r7 goto l0_%=; \
+ r1 = r6; \
+ r2 = -9; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test18, cls helper fail range 3")
+__failure __msg("R2 min value is negative")
+__naked void cls_helper_fail_range_3(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r6 += 1; \
+ r1 = r6; \
+ r1 += 7; \
+ if r1 > r7 goto l0_%=; \
+ r1 = r6; \
+ r2 = %[__imm_0]; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm_const(__imm_0, ~0),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test19, cls helper range zero")
+__success __retval(0)
+__naked void test19_cls_helper_range_zero(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r6 += 1; \
+ r1 = r6; \
+ r1 += 7; \
+ if r1 > r7 goto l0_%=; \
+ r1 = r6; \
+ r2 = 0; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test20, pkt end as input")
+__failure __msg("R1 type=pkt_end expected=fp")
+__naked void test20_pkt_end_as_input(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r6 += 1; \
+ r1 = r6; \
+ r1 += 7; \
+ if r1 > r7 goto l0_%=; \
+ r1 = r7; \
+ r2 = 4; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("helper access to packet: test21, wrong reg")
+__failure __msg("invalid access to packet")
+__naked void to_packet_test21_wrong_reg(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r6 += 1; \
+ r1 = r6; \
+ r1 += 7; \
+ if r1 > r7 goto l0_%=; \
+ r2 = 4; \
+ r3 = 0; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_csum_diff]; \
+ r0 = 0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_csum_diff),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_restricted.c b/tools/testing/selftests/bpf/progs/verifier_helper_restricted.c
new file mode 100644
index 000000000000..059aa716e3d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_helper_restricted.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/helper_restricted.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct val {
+ int cnt;
+ struct bpf_spin_lock l;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct val);
+} map_spin_lock SEC(".maps");
+
+struct timer {
+ struct bpf_timer t;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct timer);
+} map_timer SEC(".maps");
+
+SEC("kprobe")
+__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE")
+__failure __msg("program of this type cannot use helper bpf_ktime_get_coarse_ns")
+__naked void in_bpf_prog_type_kprobe_1(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_coarse_ns]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_coarse_ns)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT")
+__failure __msg("program of this type cannot use helper bpf_ktime_get_coarse_ns")
+__naked void in_bpf_prog_type_tracepoint_1(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_coarse_ns]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_coarse_ns)
+ : __clobber_all);
+}
+
+SEC("perf_event")
+__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT")
+__failure __msg("program of this type cannot use helper bpf_ktime_get_coarse_ns")
+__naked void bpf_prog_type_perf_event_1(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_coarse_ns]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_coarse_ns)
+ : __clobber_all);
+}
+
+SEC("raw_tracepoint")
+__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT")
+__failure __msg("program of this type cannot use helper bpf_ktime_get_coarse_ns")
+__naked void bpf_prog_type_raw_tracepoint_1(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_coarse_ns]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_coarse_ns)
+ : __clobber_all);
+}
+
+SEC("kprobe")
+__description("bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE")
+__failure __msg("tracing progs cannot use bpf_timer yet")
+__naked void in_bpf_prog_type_kprobe_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_timer] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = %[map_timer] ll; \
+ r3 = 1; \
+l0_%=: call %[bpf_timer_init]; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_timer_init),
+ __imm_addr(map_timer)
+ : __clobber_all);
+}
+
+SEC("perf_event")
+__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT")
+__failure __msg("tracing progs cannot use bpf_timer yet")
+__naked void bpf_prog_type_perf_event_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_timer] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = %[map_timer] ll; \
+ r3 = 1; \
+l0_%=: call %[bpf_timer_init]; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_timer_init),
+ __imm_addr(map_timer)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT")
+__failure __msg("tracing progs cannot use bpf_timer yet")
+__naked void in_bpf_prog_type_tracepoint_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_timer] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = %[map_timer] ll; \
+ r3 = 1; \
+l0_%=: call %[bpf_timer_init]; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_timer_init),
+ __imm_addr(map_timer)
+ : __clobber_all);
+}
+
+SEC("raw_tracepoint")
+__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT")
+__failure __msg("tracing progs cannot use bpf_timer yet")
+__naked void bpf_prog_type_raw_tracepoint_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_timer] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = %[map_timer] ll; \
+ r3 = 1; \
+l0_%=: call %[bpf_timer_init]; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_timer_init),
+ __imm_addr(map_timer)
+ : __clobber_all);
+}
+
+SEC("kprobe")
+__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE")
+__failure __msg("tracing progs cannot use bpf_spin_lock yet")
+__naked void in_bpf_prog_type_kprobe_3(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ call %[bpf_spin_lock]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT")
+__failure __msg("tracing progs cannot use bpf_spin_lock yet")
+__naked void in_bpf_prog_type_tracepoint_3(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ call %[bpf_spin_lock]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("perf_event")
+__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT")
+__failure __msg("tracing progs cannot use bpf_spin_lock yet")
+__naked void bpf_prog_type_perf_event_3(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ call %[bpf_spin_lock]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("raw_tracepoint")
+__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT")
+__failure __msg("tracing progs cannot use bpf_spin_lock yet")
+__naked void bpf_prog_type_raw_tracepoint_3(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ call %[bpf_spin_lock]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c b/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c
new file mode 100644
index 000000000000..886498b5e6f3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c
@@ -0,0 +1,1282 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/helper_value_access.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct other_val {
+ long long foo;
+ long long bar;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct other_val);
+} map_hash_16b SEC(".maps");
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("tracepoint")
+__description("helper access to map: full range")
+__success
+__naked void access_to_map_full_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = %[sizeof_test_val]; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(sizeof_test_val, sizeof(struct test_val))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: partial range")
+__success
+__naked void access_to_map_partial_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = 8; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+/* Call a function taking a pointer and a size which doesn't allow the size to
+ * be zero (i.e. bpf_trace_printk() declares the second argument to be
+ * ARG_CONST_SIZE, not ARG_CONST_SIZE_OR_ZERO). We attempt to pass zero for the
+ * size and expect to fail.
+ */
+SEC("tracepoint")
+__description("helper access to map: empty range")
+__failure __msg("R2 invalid zero-sized read: u64=[0,0]")
+__naked void access_to_map_empty_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = 0; \
+ call %[bpf_trace_printk]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_trace_printk),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+/* Like the test above, but this time the size register is not known to be zero;
+ * its lower-bound is zero though, which is still unacceptable.
+ */
+SEC("tracepoint")
+__description("helper access to map: possibly-empty ange")
+__failure __msg("R2 invalid zero-sized read: u64=[0,4]")
+__naked void access_to_map_possibly_empty_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ /* Read an unknown value */ \
+ r7 = *(u64*)(r0 + 0); \
+ /* Make it small and positive, to avoid other errors */ \
+ r7 &= 4; \
+ r2 = 0; \
+ r2 += r7; \
+ call %[bpf_trace_printk]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_trace_printk),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: out-of-bound range")
+__failure __msg("invalid access to map value, value_size=48 off=0 size=56")
+__naked void map_out_of_bound_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = %[__imm_0]; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, sizeof(struct test_val) + 8)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: negative range")
+__failure __msg("R2 min value is negative")
+__naked void access_to_map_negative_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r2 = -8; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via const imm): full range")
+__success
+__naked void via_const_imm_full_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r1 += %[test_val_foo]; \
+ r2 = %[__imm_0]; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via const imm): partial range")
+__success
+__naked void via_const_imm_partial_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r1 += %[test_val_foo]; \
+ r2 = 8; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via const imm): empty range")
+__failure __msg("R2 invalid zero-sized read")
+__naked void via_const_imm_empty_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r1 += %[test_val_foo]; \
+ r2 = 0; \
+ call %[bpf_trace_printk]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_trace_printk),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via const imm): out-of-bound range")
+__failure __msg("invalid access to map value, value_size=48 off=4 size=52")
+__naked void imm_out_of_bound_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r1 += %[test_val_foo]; \
+ r2 = %[__imm_0]; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via const imm): negative range (> adjustment)")
+__failure __msg("R2 min value is negative")
+__naked void const_imm_negative_range_adjustment_1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r1 += %[test_val_foo]; \
+ r2 = -8; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via const imm): negative range (< adjustment)")
+__failure __msg("R2 min value is negative")
+__naked void const_imm_negative_range_adjustment_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r1 += %[test_val_foo]; \
+ r2 = -1; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via const reg): full range")
+__success
+__naked void via_const_reg_full_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = %[test_val_foo]; \
+ r1 += r3; \
+ r2 = %[__imm_0]; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via const reg): partial range")
+__success
+__naked void via_const_reg_partial_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = %[test_val_foo]; \
+ r1 += r3; \
+ r2 = 8; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via const reg): empty range")
+__failure __msg("R2 invalid zero-sized read")
+__naked void via_const_reg_empty_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = 0; \
+ r1 += r3; \
+ r2 = 0; \
+ call %[bpf_trace_printk]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_trace_printk),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via const reg): out-of-bound range")
+__failure __msg("invalid access to map value, value_size=48 off=4 size=52")
+__naked void reg_out_of_bound_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = %[test_val_foo]; \
+ r1 += r3; \
+ r2 = %[__imm_0]; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via const reg): negative range (> adjustment)")
+__failure __msg("R2 min value is negative")
+__naked void const_reg_negative_range_adjustment_1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = %[test_val_foo]; \
+ r1 += r3; \
+ r2 = -8; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via const reg): negative range (< adjustment)")
+__failure __msg("R2 min value is negative")
+__naked void const_reg_negative_range_adjustment_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = %[test_val_foo]; \
+ r1 += r3; \
+ r2 = -1; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via variable): full range")
+__success
+__naked void map_via_variable_full_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 > %[test_val_foo] goto l0_%=; \
+ r1 += r3; \
+ r2 = %[__imm_0]; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via variable): partial range")
+__success
+__naked void map_via_variable_partial_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 > %[test_val_foo] goto l0_%=; \
+ r1 += r3; \
+ r2 = 8; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via variable): empty range")
+__failure __msg("R2 invalid zero-sized read")
+__naked void map_via_variable_empty_range(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 > %[test_val_foo] goto l0_%=; \
+ r1 += r3; \
+ r2 = 0; \
+ call %[bpf_trace_printk]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_trace_printk),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via variable): no max check")
+__failure __msg("R1 unbounded memory access")
+__naked void via_variable_no_max_check_1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ r1 += r3; \
+ r2 = 1; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to adjusted map (via variable): wrong max check")
+__failure __msg("invalid access to map value, value_size=48 off=4 size=45")
+__naked void via_variable_wrong_max_check_1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 > %[test_val_foo] goto l0_%=; \
+ r1 += r3; \
+ r2 = %[__imm_0]; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_probe_read_kernel),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 1),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: bounds check using <, good access")
+__success
+__naked void bounds_check_using_good_access_1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 < 32 goto l1_%=; \
+ r0 = 0; \
+l0_%=: exit; \
+l1_%=: r1 += r3; \
+ r0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: bounds check using <, bad access")
+__failure __msg("R1 unbounded memory access")
+__naked void bounds_check_using_bad_access_1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 < 32 goto l1_%=; \
+ r1 += r3; \
+l0_%=: r0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: bounds check using <=, good access")
+__success
+__naked void bounds_check_using_good_access_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 <= 32 goto l1_%=; \
+ r0 = 0; \
+l0_%=: exit; \
+l1_%=: r1 += r3; \
+ r0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: bounds check using <=, bad access")
+__failure __msg("R1 unbounded memory access")
+__naked void bounds_check_using_bad_access_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 <= 32 goto l1_%=; \
+ r1 += r3; \
+l0_%=: r0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: bounds check using s<, good access")
+__success
+__naked void check_using_s_good_access_1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 s< 32 goto l1_%=; \
+l2_%=: r0 = 0; \
+l0_%=: exit; \
+l1_%=: if r3 s< 0 goto l2_%=; \
+ r1 += r3; \
+ r0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: bounds check using s<, good access 2")
+__success
+__naked void using_s_good_access_2_1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 s< 32 goto l1_%=; \
+l2_%=: r0 = 0; \
+l0_%=: exit; \
+l1_%=: if r3 s< -3 goto l2_%=; \
+ r1 += r3; \
+ r0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: bounds check using s<, bad access")
+__failure __msg("R1 min value is negative")
+__naked void check_using_s_bad_access_1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u64*)(r0 + 0); \
+ if r3 s< 32 goto l1_%=; \
+l2_%=: r0 = 0; \
+l0_%=: exit; \
+l1_%=: if r3 s< -3 goto l2_%=; \
+ r1 += r3; \
+ r0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: bounds check using s<=, good access")
+__success
+__naked void check_using_s_good_access_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 s<= 32 goto l1_%=; \
+l2_%=: r0 = 0; \
+l0_%=: exit; \
+l1_%=: if r3 s<= 0 goto l2_%=; \
+ r1 += r3; \
+ r0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: bounds check using s<=, good access 2")
+__success
+__naked void using_s_good_access_2_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 s<= 32 goto l1_%=; \
+l2_%=: r0 = 0; \
+l0_%=: exit; \
+l1_%=: if r3 s<= -3 goto l2_%=; \
+ r1 += r3; \
+ r0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("helper access to map: bounds check using s<=, bad access")
+__failure __msg("R1 min value is negative")
+__naked void check_using_s_bad_access_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ r3 = *(u64*)(r0 + 0); \
+ if r3 s<= 32 goto l1_%=; \
+l2_%=: r0 = 0; \
+l0_%=: exit; \
+l1_%=: if r3 s<= -3 goto l2_%=; \
+ r1 += r3; \
+ r0 = 0; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("map lookup helper access to map")
+__success
+__naked void lookup_helper_access_to_map(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = r0; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_16b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("map update helper access to map")
+__success
+__naked void update_helper_access_to_map(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r4 = 0; \
+ r3 = r0; \
+ r2 = r0; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_update_elem]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_map_update_elem),
+ __imm_addr(map_hash_16b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("map update helper access to map: wrong size")
+__failure __msg("invalid access to map value, value_size=8 off=0 size=16")
+__naked void access_to_map_wrong_size(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r4 = 0; \
+ r3 = r0; \
+ r2 = r0; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_update_elem]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_map_update_elem),
+ __imm_addr(map_hash_16b),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("map helper access to adjusted map (via const imm)")
+__success
+__naked void adjusted_map_via_const_imm(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = r0; \
+ r2 += %[other_val_bar]; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_16b),
+ __imm_const(other_val_bar, offsetof(struct other_val, bar))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("map helper access to adjusted map (via const imm): out-of-bound 1")
+__failure __msg("invalid access to map value, value_size=16 off=12 size=8")
+__naked void imm_out_of_bound_1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = r0; \
+ r2 += %[__imm_0]; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_16b),
+ __imm_const(__imm_0, sizeof(struct other_val) - 4)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("map helper access to adjusted map (via const imm): out-of-bound 2")
+__failure __msg("invalid access to map value, value_size=16 off=-4 size=8")
+__naked void imm_out_of_bound_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = r0; \
+ r2 += -4; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_16b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("map helper access to adjusted map (via const reg)")
+__success
+__naked void adjusted_map_via_const_reg(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = r0; \
+ r3 = %[other_val_bar]; \
+ r2 += r3; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_16b),
+ __imm_const(other_val_bar, offsetof(struct other_val, bar))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("map helper access to adjusted map (via const reg): out-of-bound 1")
+__failure __msg("invalid access to map value, value_size=16 off=12 size=8")
+__naked void reg_out_of_bound_1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = r0; \
+ r3 = %[__imm_0]; \
+ r2 += r3; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_16b),
+ __imm_const(__imm_0, sizeof(struct other_val) - 4)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("map helper access to adjusted map (via const reg): out-of-bound 2")
+__failure __msg("invalid access to map value, value_size=16 off=-4 size=8")
+__naked void reg_out_of_bound_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = r0; \
+ r3 = -4; \
+ r2 += r3; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_16b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("map helper access to adjusted map (via variable)")
+__success
+__naked void to_adjusted_map_via_variable(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 > %[other_val_bar] goto l0_%=; \
+ r2 += r3; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_16b),
+ __imm_const(other_val_bar, offsetof(struct other_val, bar))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("map helper access to adjusted map (via variable): no max check")
+__failure
+__msg("R2 unbounded memory access, make sure to bounds check any such access")
+__naked void via_variable_no_max_check_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ r2 += r3; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_16b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("map helper access to adjusted map (via variable): wrong max check")
+__failure __msg("invalid access to map value, value_size=16 off=9 size=8")
+__naked void via_variable_wrong_max_check_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r2 = r0; \
+ r3 = *(u32*)(r0 + 0); \
+ if r3 > %[__imm_0] goto l0_%=; \
+ r2 += r3; \
+ r1 = %[map_hash_16b] ll; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_16b),
+ __imm_const(__imm_0, offsetof(struct other_val, bar) + 1)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
new file mode 100644
index 000000000000..59e34d558654
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/int_ptr.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("arg pointer to long uninitialized")
+__success
+__naked void arg_ptr_to_long_uninitialized(void)
+{
+ asm volatile (" \
+ /* bpf_strtoul arg1 (buf) */ \
+ r7 = r10; \
+ r7 += -8; \
+ r0 = 0x00303036; \
+ *(u64*)(r7 + 0) = r0; \
+ r1 = r7; \
+ /* bpf_strtoul arg2 (buf_len) */ \
+ r2 = 4; \
+ /* bpf_strtoul arg3 (flags) */ \
+ r3 = 0; \
+ /* bpf_strtoul arg4 (res) */ \
+ r7 += -8; \
+ r4 = r7; \
+ /* bpf_strtoul() */ \
+ call %[bpf_strtoul]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_strtoul)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("arg pointer to long half-uninitialized")
+__success
+__retval(0)
+__naked void ptr_to_long_half_uninitialized(void)
+{
+ asm volatile (" \
+ /* bpf_strtoul arg1 (buf) */ \
+ r7 = r10; \
+ r7 += -8; \
+ r0 = 0x00303036; \
+ *(u64*)(r7 + 0) = r0; \
+ r1 = r7; \
+ /* bpf_strtoul arg2 (buf_len) */ \
+ r2 = 4; \
+ /* bpf_strtoul arg3 (flags) */ \
+ r3 = 0; \
+ /* bpf_strtoul arg4 (res) */ \
+ r7 += -8; \
+ *(u32*)(r7 + 0) = r0; \
+ r4 = r7; \
+ /* bpf_strtoul() */ \
+ call %[bpf_strtoul]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_strtoul)
+ : __clobber_all);
+}
+
+SEC("cgroup/sysctl")
+__description("arg pointer to long misaligned")
+__failure __msg("misaligned stack access off 0+-20+0 size 8")
+__naked void arg_ptr_to_long_misaligned(void)
+{
+ asm volatile (" \
+ /* bpf_strtoul arg1 (buf) */ \
+ r7 = r10; \
+ r7 += -8; \
+ r0 = 0x00303036; \
+ *(u64*)(r7 + 0) = r0; \
+ r1 = r7; \
+ /* bpf_strtoul arg2 (buf_len) */ \
+ r2 = 4; \
+ /* bpf_strtoul arg3 (flags) */ \
+ r3 = 0; \
+ /* bpf_strtoul arg4 (res) */ \
+ r7 += -12; \
+ r0 = 0; \
+ *(u32*)(r7 + 0) = r0; \
+ *(u64*)(r7 + 4) = r0; \
+ r4 = r7; \
+ /* bpf_strtoul() */ \
+ call %[bpf_strtoul]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_strtoul)
+ : __clobber_all);
+}
+
+SEC("cgroup/sysctl")
+__description("arg pointer to long size < sizeof(long)")
+__failure __msg("invalid write to stack R4 off=-4 size=8")
+__naked void to_long_size_sizeof_long(void)
+{
+ asm volatile (" \
+ /* bpf_strtoul arg1 (buf) */ \
+ r7 = r10; \
+ r7 += -16; \
+ r0 = 0x00303036; \
+ *(u64*)(r7 + 0) = r0; \
+ r1 = r7; \
+ /* bpf_strtoul arg2 (buf_len) */ \
+ r2 = 4; \
+ /* bpf_strtoul arg3 (flags) */ \
+ r3 = 0; \
+ /* bpf_strtoul arg4 (res) */ \
+ r7 += 12; \
+ *(u32*)(r7 + 0) = r0; \
+ r4 = r7; \
+ /* bpf_strtoul() */ \
+ call %[bpf_strtoul]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_strtoul)
+ : __clobber_all);
+}
+
+SEC("cgroup/sysctl")
+__description("arg pointer to long initialized")
+__success
+__naked void arg_ptr_to_long_initialized(void)
+{
+ asm volatile (" \
+ /* bpf_strtoul arg1 (buf) */ \
+ r7 = r10; \
+ r7 += -8; \
+ r0 = 0x00303036; \
+ *(u64*)(r7 + 0) = r0; \
+ r1 = r7; \
+ /* bpf_strtoul arg2 (buf_len) */ \
+ r2 = 4; \
+ /* bpf_strtoul arg3 (flags) */ \
+ r3 = 0; \
+ /* bpf_strtoul arg4 (res) */ \
+ r7 += -8; \
+ *(u64*)(r7 + 0) = r0; \
+ r4 = r7; \
+ /* bpf_strtoul() */ \
+ call %[bpf_strtoul]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_strtoul)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
new file mode 100644
index 000000000000..75dd922e4e9f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
@@ -0,0 +1,786 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 8);
+ __type(key, __u32);
+ __type(value, __u64);
+} map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_USER_RINGBUF);
+ __uint(max_entries, 8);
+} ringbuf SEC(".maps");
+
+struct vm_area_struct;
+struct bpf_map;
+
+struct buf_context {
+ char *buf;
+};
+
+struct num_context {
+ __u64 i;
+ __u64 j;
+};
+
+__u8 choice_arr[2] = { 0, 1 };
+
+static int unsafe_on_2nd_iter_cb(__u32 idx, struct buf_context *ctx)
+{
+ if (idx == 0) {
+ ctx->buf = (char *)(0xDEAD);
+ return 0;
+ }
+
+ if (bpf_probe_read_user(ctx->buf, 8, (void *)(0xBADC0FFEE)))
+ return 1;
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("R1 type=scalar expected=fp")
+int unsafe_on_2nd_iter(void *unused)
+{
+ char buf[4];
+ struct buf_context loop_ctx = { .buf = buf };
+
+ bpf_loop(100, unsafe_on_2nd_iter_cb, &loop_ctx, 0);
+ return 0;
+}
+
+static int unsafe_on_zero_iter_cb(__u32 idx, struct num_context *ctx)
+{
+ ctx->i = 0;
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
+int unsafe_on_zero_iter(void *unused)
+{
+ struct num_context loop_ctx = { .i = 32 };
+
+ bpf_loop(100, unsafe_on_zero_iter_cb, &loop_ctx, 0);
+ return choice_arr[loop_ctx.i];
+}
+
+static int widening_cb(__u32 idx, struct num_context *ctx)
+{
+ ++ctx->i;
+ return 0;
+}
+
+SEC("?raw_tp")
+__success
+int widening(void *unused)
+{
+ struct num_context loop_ctx = { .i = 0, .j = 1 };
+
+ bpf_loop(100, widening_cb, &loop_ctx, 0);
+ /* loop_ctx.j is not changed during callback iteration,
+ * verifier should not apply widening to it.
+ */
+ return choice_arr[loop_ctx.j];
+}
+
+static int loop_detection_cb(__u32 idx, struct num_context *ctx)
+{
+ for (;;) {}
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("infinite loop detected")
+int loop_detection(void *unused)
+{
+ struct num_context loop_ctx = { .i = 0 };
+
+ bpf_loop(100, loop_detection_cb, &loop_ctx, 0);
+ return 0;
+}
+
+static __always_inline __u64 oob_state_machine(struct num_context *ctx)
+{
+ switch (ctx->i) {
+ case 0:
+ ctx->i = 1;
+ break;
+ case 1:
+ ctx->i = 32;
+ break;
+ }
+ return 0;
+}
+
+static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
+{
+ return oob_state_machine(data);
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
+int unsafe_for_each_map_elem(void *unused)
+{
+ struct num_context loop_ctx = { .i = 0 };
+
+ bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0);
+ return choice_arr[loop_ctx.i];
+}
+
+static __u64 ringbuf_drain_cb(struct bpf_dynptr *dynptr, void *data)
+{
+ return oob_state_machine(data);
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
+int unsafe_ringbuf_drain(void *unused)
+{
+ struct num_context loop_ctx = { .i = 0 };
+
+ bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0);
+ return choice_arr[loop_ctx.i];
+}
+
+static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data)
+{
+ return oob_state_machine(data);
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
+int unsafe_find_vma(void *unused)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct num_context loop_ctx = { .i = 0 };
+
+ bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0);
+ return choice_arr[loop_ctx.i];
+}
+
+static int iter_limit_cb(__u32 idx, struct num_context *ctx)
+{
+ ctx->i++;
+ return 0;
+}
+
+SEC("?raw_tp")
+__success
+int bpf_loop_iter_limit_ok(void *unused)
+{
+ struct num_context ctx = { .i = 0 };
+
+ bpf_loop(1, iter_limit_cb, &ctx, 0);
+ return choice_arr[ctx.i];
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid access to map value, value_size=2 off=2 size=1")
+int bpf_loop_iter_limit_overflow(void *unused)
+{
+ struct num_context ctx = { .i = 0 };
+
+ bpf_loop(2, iter_limit_cb, &ctx, 0);
+ return choice_arr[ctx.i];
+}
+
+static int iter_limit_level2a_cb(__u32 idx, struct num_context *ctx)
+{
+ ctx->i += 100;
+ return 0;
+}
+
+static int iter_limit_level2b_cb(__u32 idx, struct num_context *ctx)
+{
+ ctx->i += 10;
+ return 0;
+}
+
+static int iter_limit_level1_cb(__u32 idx, struct num_context *ctx)
+{
+ ctx->i += 1;
+ bpf_loop(1, iter_limit_level2a_cb, ctx, 0);
+ bpf_loop(1, iter_limit_level2b_cb, ctx, 0);
+ return 0;
+}
+
+/* Check that path visiting every callback function once had been
+ * reached by verifier. Variables 'ctx{1,2}i' below serve as flags,
+ * with each decimal digit corresponding to a callback visit marker.
+ */
+SEC("socket")
+__success __retval(111111)
+int bpf_loop_iter_limit_nested(void *unused)
+{
+ struct num_context ctx1 = { .i = 0 };
+ struct num_context ctx2 = { .i = 0 };
+ __u64 a, b, c;
+
+ bpf_loop(1, iter_limit_level1_cb, &ctx1, 0);
+ bpf_loop(1, iter_limit_level1_cb, &ctx2, 0);
+ a = ctx1.i;
+ b = ctx2.i;
+ /* Force 'ctx1.i' and 'ctx2.i' precise. */
+ c = choice_arr[(a + b) % 2];
+ /* This makes 'c' zero, but neither clang nor verifier know it. */
+ c /= 10;
+ /* Make sure that verifier does not visit 'impossible' states:
+ * enumerate all possible callback visit masks.
+ */
+ if (a != 0 && a != 1 && a != 11 && a != 101 && a != 111 &&
+ b != 0 && b != 1 && b != 11 && b != 101 && b != 111)
+ asm volatile ("r0 /= 0;" ::: "r0");
+ return 1000 * a + b + c;
+}
+
+struct iter_limit_bug_ctx {
+ __u64 a;
+ __u64 b;
+ __u64 c;
+};
+
+static __naked void iter_limit_bug_cb(void)
+{
+ /* This is the same as C code below, but written
+ * in assembly to control which branches are fall-through.
+ *
+ * switch (bpf_get_prandom_u32()) {
+ * case 1: ctx->a = 42; break;
+ * case 2: ctx->b = 42; break;
+ * default: ctx->c = 42; break;
+ * }
+ */
+ asm volatile (
+ "r9 = r2;"
+ "call %[bpf_get_prandom_u32];"
+ "r1 = r0;"
+ "r2 = 42;"
+ "r0 = 0;"
+ "if r1 == 0x1 goto 1f;"
+ "if r1 == 0x2 goto 2f;"
+ "*(u64 *)(r9 + 16) = r2;"
+ "exit;"
+ "1: *(u64 *)(r9 + 0) = r2;"
+ "exit;"
+ "2: *(u64 *)(r9 + 8) = r2;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+int tmp_var;
+SEC("socket")
+__failure __msg("infinite loop detected at insn 2")
+__naked void jgt_imm64_and_may_goto(void)
+{
+ asm volatile (" \
+ r0 = %[tmp_var] ll; \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short -3; /* off -3 */ \
+ .long 0; /* imm */ \
+ if r0 > 10 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" :: __imm_addr(tmp_var)
+ : __clobber_all);
+}
+
+SEC("socket")
+__failure __msg("infinite loop detected at insn 1")
+__naked void may_goto_self(void)
+{
+ asm volatile (" \
+ r0 = *(u32 *)(r10 - 4); \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short -1; /* off -1 */ \
+ .long 0; /* imm */ \
+ if r0 > 10 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__success __retval(0)
+__naked void may_goto_neg_off(void)
+{
+ asm volatile (" \
+ r0 = *(u32 *)(r10 - 4); \
+ goto l0_%=; \
+ goto l1_%=; \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short -2; /* off -2 */ \
+ .long 0; /* imm */ \
+ if r0 > 10 goto l0_%=; \
+l1_%=: r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__failure
+__flag(BPF_F_TEST_STATE_FREQ)
+int iter_limit_bug(struct __sk_buff *skb)
+{
+ struct iter_limit_bug_ctx ctx = { 7, 7, 7 };
+
+ bpf_loop(2, iter_limit_bug_cb, &ctx, 0);
+
+ /* This is the same as C code below,
+ * written in assembly to guarantee checks order.
+ *
+ * if (ctx.a == 42 && ctx.b == 42 && ctx.c == 7)
+ * asm volatile("r1 /= 0;":::"r1");
+ */
+ asm volatile (
+ "r1 = *(u64 *)%[ctx_a];"
+ "if r1 != 42 goto 1f;"
+ "r1 = *(u64 *)%[ctx_b];"
+ "if r1 != 42 goto 1f;"
+ "r1 = *(u64 *)%[ctx_c];"
+ "if r1 != 7 goto 1f;"
+ "r1 /= 0;"
+ "1:"
+ :
+ : [ctx_a]"m"(ctx.a),
+ [ctx_b]"m"(ctx.b),
+ [ctx_c]"m"(ctx.c)
+ : "r1"
+ );
+ return 0;
+}
+
+SEC("socket")
+__success __retval(0)
+__naked void ja_and_may_goto(void)
+{
+ asm volatile (" \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_common);
+}
+
+SEC("socket")
+__success __retval(0)
+__naked void ja_and_may_goto2(void)
+{
+ asm volatile (" \
+l0_%=: r0 = 0; \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_common);
+}
+
+SEC("socket")
+__success __retval(0)
+__naked void jlt_and_may_goto(void)
+{
+ asm volatile (" \
+l0_%=: call %[bpf_jiffies64]; \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ if r0 < 10 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" :: __imm(bpf_jiffies64)
+ : __clobber_all);
+}
+
+#ifdef CAN_USE_GOTOL
+SEC("socket")
+__success __retval(0)
+__naked void gotol_and_may_goto(void)
+{
+ asm volatile (" \
+l0_%=: r0 = 0; \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ gotol l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_common);
+}
+#endif
+
+SEC("socket")
+__success __retval(0)
+__naked void ja_and_may_goto_subprog(void)
+{
+ asm volatile (" \
+ call subprog_with_may_goto; \
+ exit; \
+" ::: __clobber_all);
+}
+
+static __naked __noinline __used
+void subprog_with_may_goto(void)
+{
+ asm volatile (" \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+#define ARR_SZ 1000000
+int zero;
+char arr[ARR_SZ];
+
+SEC("socket")
+__success __retval(0xd495cdc0)
+int cond_break1(const void *ctx)
+{
+ unsigned long i;
+ unsigned int sum = 0;
+
+ for (i = zero; i < ARR_SZ && can_loop; i++)
+ sum += i;
+ for (i = zero; i < ARR_SZ; i++) {
+ barrier_var(i);
+ sum += i + arr[i];
+ cond_break;
+ }
+
+ return sum;
+}
+
+SEC("socket")
+__success __retval(999000000)
+int cond_break2(const void *ctx)
+{
+ int i, j;
+ int sum = 0;
+
+ for (i = zero; i < 1000 && can_loop; i++)
+ for (j = zero; j < 1000; j++) {
+ sum += i + j;
+ cond_break;
+ }
+ return sum;
+}
+
+static __noinline int loop(void)
+{
+ int i, sum = 0;
+
+ for (i = zero; i <= 1000000 && can_loop; i++)
+ sum += i;
+
+ return sum;
+}
+
+SEC("socket")
+__success __retval(0x6a5a2920)
+int cond_break3(const void *ctx)
+{
+ return loop();
+}
+
+SEC("socket")
+__success __retval(1)
+int cond_break4(const void *ctx)
+{
+ int cnt = zero;
+
+ for (;;) {
+ /* should eventually break out of the loop */
+ cond_break;
+ cnt++;
+ }
+ /* if we looped a bit, it's a success */
+ return cnt > 1 ? 1 : 0;
+}
+
+static __noinline int static_subprog(void)
+{
+ int cnt = zero;
+
+ for (;;) {
+ cond_break;
+ cnt++;
+ }
+
+ return cnt;
+}
+
+SEC("socket")
+__success __retval(1)
+int cond_break5(const void *ctx)
+{
+ int cnt1 = zero, cnt2;
+
+ for (;;) {
+ cond_break;
+ cnt1++;
+ }
+
+ cnt2 = static_subprog();
+
+ /* main and subprog have to loop a bit */
+ return cnt1 > 1 && cnt2 > 1 ? 1 : 0;
+}
+
+#define ARR2_SZ 1000
+SEC(".data.arr2")
+char arr2[ARR2_SZ];
+
+SEC("socket")
+__success __flag(BPF_F_TEST_STATE_FREQ)
+int loop_inside_iter(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ __u64 i = 0;
+
+ bpf_iter_num_new(&it, 0, ARR2_SZ);
+ while ((v = bpf_iter_num_next(&it))) {
+ if (i < ARR2_SZ)
+ sum += arr2[i++];
+ }
+ bpf_iter_num_destroy(&it);
+ return sum;
+}
+
+SEC("socket")
+__success __flag(BPF_F_TEST_STATE_FREQ)
+int loop_inside_iter_signed(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ long i = 0;
+
+ bpf_iter_num_new(&it, 0, ARR2_SZ);
+ while ((v = bpf_iter_num_next(&it))) {
+ if (i < ARR2_SZ && i >= 0)
+ sum += arr2[i++];
+ }
+ bpf_iter_num_destroy(&it);
+ return sum;
+}
+
+volatile const int limit = ARR2_SZ;
+
+SEC("socket")
+__success __flag(BPF_F_TEST_STATE_FREQ)
+int loop_inside_iter_volatile_limit(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ __u64 i = 0;
+
+ bpf_iter_num_new(&it, 0, ARR2_SZ);
+ while ((v = bpf_iter_num_next(&it))) {
+ if (i < limit)
+ sum += arr2[i++];
+ }
+ bpf_iter_num_destroy(&it);
+ return sum;
+}
+
+#define ARR_LONG_SZ 1000
+
+SEC(".data.arr_long")
+long arr_long[ARR_LONG_SZ];
+
+SEC("socket")
+__success
+int test1(const void *ctx)
+{
+ long i;
+
+ for (i = 0; i < ARR_LONG_SZ && can_loop; i++)
+ arr_long[i] = i;
+ return 0;
+}
+
+SEC("socket")
+__success
+int test2(const void *ctx)
+{
+ __u64 i;
+
+ for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
+ barrier_var(i);
+ arr_long[i] = i;
+ }
+ return 0;
+}
+
+SEC(".data.arr_foo")
+struct {
+ int a;
+ int b;
+} arr_foo[ARR_LONG_SZ];
+
+SEC("socket")
+__success
+int test3(const void *ctx)
+{
+ __u64 i;
+
+ for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
+ barrier_var(i);
+ arr_foo[i].a = i;
+ arr_foo[i].b = i;
+ }
+ return 0;
+}
+
+SEC("socket")
+__success
+int test4(const void *ctx)
+{
+ long i;
+
+ for (i = zero + ARR_LONG_SZ - 1; i < ARR_LONG_SZ && i >= 0 && can_loop; i--) {
+ barrier_var(i);
+ arr_foo[i].a = i;
+ arr_foo[i].b = i;
+ }
+ return 0;
+}
+
+char buf[10] SEC(".data.buf");
+
+SEC("socket")
+__description("check add const")
+__success
+__naked void check_add_const(void)
+{
+ /* typical LLVM generated loop with may_goto */
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 > 9 goto l1_%=; \
+l0_%=: r1 = %[buf]; \
+ r2 = r0; \
+ r1 += r2; \
+ r3 = *(u8 *)(r1 +0); \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 4; /* off of l1_%=: */ \
+ .long 0; /* imm */ \
+ r0 = r2; \
+ r0 += 1; \
+ if r2 < 9 goto l0_%=; \
+ exit; \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
+SEC("socket")
+__failure
+__msg("*(u8 *)(r7 +0) = r0")
+__msg("invalid access to map value, value_size=10 off=10 size=1")
+__naked void check_add_const_3regs(void)
+{
+ asm volatile (
+ "r6 = %[buf];"
+ "r7 = %[buf];"
+ "call %[bpf_ktime_get_ns];"
+ "r1 = r0;" /* link r0.id == r1.id == r2.id */
+ "r2 = r0;"
+ "r1 += 1;" /* r1 == r0+1 */
+ "r2 += 2;" /* r2 == r0+2 */
+ "if r0 > 8 goto 1f;" /* r0 range [0, 8] */
+ "r6 += r1;" /* r1 range [1, 9] */
+ "r7 += r2;" /* r2 range [2, 10] */
+ "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */
+ "*(u8 *)(r7 +0) = r0;" /* unsafe, out of bounds */
+ "1: exit;"
+ :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
+SEC("socket")
+__failure
+__msg("*(u8 *)(r8 -1) = r0")
+__msg("invalid access to map value, value_size=10 off=10 size=1")
+__naked void check_add_const_3regs_2if(void)
+{
+ asm volatile (
+ "r6 = %[buf];"
+ "r7 = %[buf];"
+ "r8 = %[buf];"
+ "call %[bpf_ktime_get_ns];"
+ "if r0 < 2 goto 1f;"
+ "r1 = r0;" /* link r0.id == r1.id == r2.id */
+ "r2 = r0;"
+ "r1 += 1;" /* r1 == r0+1 */
+ "r2 += 2;" /* r2 == r0+2 */
+ "if r2 > 11 goto 1f;" /* r2 range [0, 11] -> r0 range [-2, 9]; r1 range [-1, 10] */
+ "if r0 s< 0 goto 1f;" /* r0 range [0, 9] -> r1 range [1, 10]; r2 range [2, 11]; */
+ "r6 += r0;" /* r0 range [0, 9] */
+ "r7 += r1;" /* r1 range [1, 10] */
+ "r8 += r2;" /* r2 range [2, 11] */
+ "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */
+ "*(u8 *)(r7 -1) = r0;" /* safe */
+ "*(u8 *)(r8 -1) = r0;" /* unsafe */
+ "1: exit;"
+ :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
+SEC("socket")
+__failure
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void check_add_const_regsafe_off(void)
+{
+ asm volatile (
+ "r8 = %[buf];"
+ "call %[bpf_ktime_get_ns];"
+ "r6 = r0;"
+ "call %[bpf_ktime_get_ns];"
+ "r7 = r0;"
+ "call %[bpf_ktime_get_ns];"
+ "r1 = r0;" /* same ids for r1 and r0 */
+ "if r6 > r7 goto 1f;" /* this jump can't be predicted */
+ "r1 += 1;" /* r1.off == +1 */
+ "goto 2f;"
+ "1: r1 += 100;" /* r1.off == +100 */
+ "goto +0;" /* verify r1.off in regsafe() after this insn */
+ "2: if r0 > 8 goto 3f;" /* r0 range [0,8], r1 range either [1,9] or [100,108]*/
+ "r8 += r1;"
+ "*(u8 *)(r8 +0) = r0;" /* potentially unsafe, buf size is 10 */
+ "3: exit;"
+ :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c b/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c
new file mode 100644
index 000000000000..bf16b00502f2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_XSKMAP);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} map_xskmap SEC(".maps");
+
+/* This is equivalent to the following program:
+ *
+ * r6 = skb->sk;
+ * r7 = sk_fullsock(r6);
+ * r0 = sk_fullsock(r6);
+ * if (r0 == 0) return 0; (a)
+ * if (r0 != r7) return 0; (b)
+ * *r7->type; (c)
+ * return 0;
+ *
+ * It is safe to dereference r7 at point (c), because of (a) and (b).
+ * The test verifies that relation r0 == r7 is propagated from (b) to (c).
+ */
+SEC("cgroup/skb")
+__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JNE false branch")
+__success __failure_unpriv __msg_unpriv("R7 pointer comparison")
+__retval(0)
+__naked void socket_for_jne_false_branch(void)
+{
+ asm volatile (" \
+ /* r6 = skb->sk; */ \
+ r6 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ /* if (r6 == 0) return 0; */ \
+ if r6 == 0 goto l0_%=; \
+ /* r7 = sk_fullsock(skb); */ \
+ r1 = r6; \
+ call %[bpf_sk_fullsock]; \
+ r7 = r0; \
+ /* r0 = sk_fullsock(skb); */ \
+ r1 = r6; \
+ call %[bpf_sk_fullsock]; \
+ /* if (r0 == null) return 0; */ \
+ if r0 == 0 goto l0_%=; \
+ /* if (r0 == r7) r0 = *(r7->type); */ \
+ if r0 != r7 goto l0_%=; /* Use ! JNE ! */\
+ r0 = *(u32*)(r7 + %[bpf_sock_type]); \
+l0_%=: /* return 0 */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
+ : __clobber_all);
+}
+
+/* Same as above, but verify that another branch of JNE still
+ * prohibits access to PTR_MAYBE_NULL.
+ */
+SEC("cgroup/skb")
+__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JNE true branch")
+__failure __msg("R7 invalid mem access 'sock_or_null'")
+__failure_unpriv __msg_unpriv("R7 pointer comparison")
+__naked void unchanged_for_jne_true_branch(void)
+{
+ asm volatile (" \
+ /* r6 = skb->sk */ \
+ r6 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ /* if (r6 == 0) return 0; */ \
+ if r6 == 0 goto l0_%=; \
+ /* r7 = sk_fullsock(skb); */ \
+ r1 = r6; \
+ call %[bpf_sk_fullsock]; \
+ r7 = r0; \
+ /* r0 = sk_fullsock(skb); */ \
+ r1 = r6; \
+ call %[bpf_sk_fullsock]; \
+ /* if (r0 == null) return 0; */ \
+ if r0 != 0 goto l0_%=; \
+ /* if (r0 == r7) return 0; */ \
+ if r0 != r7 goto l1_%=; /* Use ! JNE ! */\
+ goto l0_%=; \
+l1_%=: /* r0 = *(r7->type); */ \
+ r0 = *(u32*)(r7 + %[bpf_sock_type]); \
+l0_%=: /* return 0 */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
+ : __clobber_all);
+}
+
+/* Same as a first test, but not null should be inferred for JEQ branch */
+SEC("cgroup/skb")
+__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JEQ true branch")
+__success __failure_unpriv __msg_unpriv("R7 pointer comparison")
+__retval(0)
+__naked void socket_for_jeq_true_branch(void)
+{
+ asm volatile (" \
+ /* r6 = skb->sk; */ \
+ r6 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ /* if (r6 == null) return 0; */ \
+ if r6 == 0 goto l0_%=; \
+ /* r7 = sk_fullsock(skb); */ \
+ r1 = r6; \
+ call %[bpf_sk_fullsock]; \
+ r7 = r0; \
+ /* r0 = sk_fullsock(skb); */ \
+ r1 = r6; \
+ call %[bpf_sk_fullsock]; \
+ /* if (r0 == null) return 0; */ \
+ if r0 == 0 goto l0_%=; \
+ /* if (r0 != r7) return 0; */ \
+ if r0 == r7 goto l1_%=; /* Use ! JEQ ! */\
+ goto l0_%=; \
+l1_%=: /* r0 = *(r7->type); */ \
+ r0 = *(u32*)(r7 + %[bpf_sock_type]); \
+l0_%=: /* return 0; */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
+ : __clobber_all);
+}
+
+/* Same as above, but verify that another branch of JNE still
+ * prohibits access to PTR_MAYBE_NULL.
+ */
+SEC("cgroup/skb")
+__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JEQ false branch")
+__failure __msg("R7 invalid mem access 'sock_or_null'")
+__failure_unpriv __msg_unpriv("R7 pointer comparison")
+__naked void unchanged_for_jeq_false_branch(void)
+{
+ asm volatile (" \
+ /* r6 = skb->sk; */ \
+ r6 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ /* if (r6 == null) return 0; */ \
+ if r6 == 0 goto l0_%=; \
+ /* r7 = sk_fullsock(skb); */ \
+ r1 = r6; \
+ call %[bpf_sk_fullsock]; \
+ r7 = r0; \
+ /* r0 = sk_fullsock(skb); */ \
+ r1 = r6; \
+ call %[bpf_sk_fullsock]; \
+ /* if (r0 == null) return 0; */ \
+ if r0 == 0 goto l0_%=; \
+ /* if (r0 != r7) r0 = *(r7->type); */ \
+ if r0 == r7 goto l0_%=; /* Use ! JEQ ! */\
+ r0 = *(u32*)(r7 + %[bpf_sock_type]); \
+l0_%=: /* return 0; */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
+ : __clobber_all);
+}
+
+/* Maps are treated in a different branch of `mark_ptr_not_null_reg`,
+ * so separate test for maps case.
+ */
+SEC("xdp")
+__description("jne/jeq infer not null, PTR_TO_MAP_VALUE_OR_NULL -> PTR_TO_MAP_VALUE")
+__success __retval(0)
+__naked void null_ptr_to_map_value(void)
+{
+ asm volatile (" \
+ /* r9 = &some stack to use as key */ \
+ r1 = 0; \
+ *(u32*)(r10 - 8) = r1; \
+ r9 = r10; \
+ r9 += -8; \
+ /* r8 = process local map */ \
+ r8 = %[map_xskmap] ll; \
+ /* r6 = map_lookup_elem(r8, r9); */ \
+ r1 = r8; \
+ r2 = r9; \
+ call %[bpf_map_lookup_elem]; \
+ r6 = r0; \
+ /* r7 = map_lookup_elem(r8, r9); */ \
+ r1 = r8; \
+ r2 = r9; \
+ call %[bpf_map_lookup_elem]; \
+ r7 = r0; \
+ /* if (r6 == 0) return 0; */ \
+ if r6 == 0 goto l0_%=; \
+ /* if (r6 != r7) return 0; */ \
+ if r6 != r7 goto l0_%=; \
+ /* read *r7; */ \
+ r0 = *(u32*)(r7 + %[bpf_xdp_sock_queue_id]); \
+l0_%=: /* return 0; */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_xskmap),
+ __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_jit_convergence.c b/tools/testing/selftests/bpf/progs/verifier_jit_convergence.c
new file mode 100644
index 000000000000..9f3f2b7db450
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_jit_convergence.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct value_t {
+ long long a[32];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct value_t);
+} map_hash SEC(".maps");
+
+SEC("socket")
+__description("bpf_jit_convergence je <-> jmp")
+__success __retval(0)
+__arch_x86_64
+__jited(" pushq %rbp")
+__naked void btf_jit_convergence_je_jmp(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 0 goto l20_%=;"
+ "if r0 == 1 goto l21_%=;"
+ "if r0 == 2 goto l22_%=;"
+ "if r0 == 3 goto l23_%=;"
+ "if r0 == 4 goto l24_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "call %[bpf_get_prandom_u32];"
+"l20_%=:"
+"l21_%=:"
+"l22_%=:"
+"l23_%=:"
+"l24_%=:"
+ "r1 = 0;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "r1 = %[map_hash] ll;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto l1_%=;"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r5 = r6;"
+ "if r0 != 0x0 goto l12_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "r1 = r0;"
+ "r2 = r6;"
+ "if r1 == 0x0 goto l0_%=;"
+"l9_%=:"
+ "r2 = *(u64 *)(r6 + 0x0);"
+ "r2 += 0x1;"
+ "*(u64 *)(r6 + 0x0) = r2;"
+ "goto l1_%=;"
+"l12_%=:"
+ "r1 = r7;"
+ "r1 += 0x98;"
+ "r2 = r5;"
+ "r2 += 0x90;"
+ "r2 = *(u32 *)(r2 + 0x0);"
+ "r3 = r7;"
+ "r3 &= 0x1;"
+ "r2 *= 0xa8;"
+ "if r3 == 0x0 goto l2_%=;"
+ "r1 += r2;"
+ "r1 -= r7;"
+ "r1 += 0x8;"
+ "if r1 <= 0xb20 goto l3_%=;"
+ "r1 = 0x0;"
+ "goto l4_%=;"
+"l3_%=:"
+ "r1 += r7;"
+"l4_%=:"
+ "if r1 == 0x0 goto l8_%=;"
+ "goto l9_%=;"
+"l2_%=:"
+ "r1 += r2;"
+ "r1 -= r7;"
+ "r1 += 0x10;"
+ "if r1 <= 0xb20 goto l6_%=;"
+ "r1 = 0x0;"
+ "goto l7_%=;"
+"l6_%=:"
+ "r1 += r7;"
+"l7_%=:"
+ "if r1 == 0x0 goto l8_%=;"
+ "goto l9_%=;"
+"l0_%=:"
+ "r1 = 0x3;"
+ "*(u64 *)(r10 - 0x10) = r1;"
+ "r2 = r1;"
+ "goto l1_%=;"
+"l8_%=:"
+ "r1 = r5;"
+ "r1 += 0x4;"
+ "r1 = *(u32 *)(r1 + 0x0);"
+ "*(u64 *)(r10 - 0x8) = r1;"
+"l1_%=:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c b/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c
new file mode 100644
index 000000000000..a509cad97e69
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+#include "cgrp_kfunc_common.h"
+#include "cpumask_common.h"
+#include "task_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+/***************
+ * Task kfuncs *
+ ***************/
+
+static void task_kfunc_load_test(void)
+{
+ struct task_struct *current, *ref_1, *ref_2;
+
+ current = bpf_get_current_task_btf();
+ ref_1 = bpf_task_from_pid(current->pid);
+ if (!ref_1)
+ return;
+
+ ref_2 = bpf_task_acquire(ref_1);
+ if (ref_2)
+ bpf_task_release(ref_2);
+ bpf_task_release(ref_1);
+}
+
+SEC("raw_tp")
+__failure __msg("calling kernel function")
+int BPF_PROG(task_kfunc_raw_tp)
+{
+ task_kfunc_load_test();
+ return 0;
+}
+
+SEC("syscall")
+__success
+int BPF_PROG(task_kfunc_syscall)
+{
+ task_kfunc_load_test();
+ return 0;
+}
+
+SEC("tracepoint")
+__success
+int BPF_PROG(task_kfunc_tracepoint)
+{
+ task_kfunc_load_test();
+ return 0;
+}
+
+SEC("perf_event")
+__success
+int BPF_PROG(task_kfunc_perf_event)
+{
+ task_kfunc_load_test();
+ return 0;
+}
+
+/*****************
+ * cgroup kfuncs *
+ *****************/
+
+static void cgrp_kfunc_load_test(void)
+{
+ struct cgroup *cgrp, *ref;
+
+ cgrp = bpf_cgroup_from_id(0);
+ if (!cgrp)
+ return;
+
+ ref = bpf_cgroup_acquire(cgrp);
+ if (!ref) {
+ bpf_cgroup_release(cgrp);
+ return;
+ }
+
+ bpf_cgroup_release(ref);
+ bpf_cgroup_release(cgrp);
+}
+
+SEC("raw_tp")
+__failure __msg("calling kernel function")
+int BPF_PROG(cgrp_kfunc_raw_tp)
+{
+ cgrp_kfunc_load_test();
+ return 0;
+}
+
+SEC("syscall")
+__success
+int BPF_PROG(cgrp_kfunc_syscall)
+{
+ cgrp_kfunc_load_test();
+ return 0;
+}
+
+SEC("tracepoint")
+__success
+int BPF_PROG(cgrp_kfunc_tracepoint)
+{
+ cgrp_kfunc_load_test();
+ return 0;
+}
+
+SEC("perf_event")
+__success
+int BPF_PROG(cgrp_kfunc_perf_event)
+{
+ cgrp_kfunc_load_test();
+ return 0;
+}
+
+/******************
+ * cpumask kfuncs *
+ ******************/
+
+static void cpumask_kfunc_load_test(void)
+{
+ struct bpf_cpumask *alloc, *ref;
+
+ alloc = bpf_cpumask_create();
+ if (!alloc)
+ return;
+
+ ref = bpf_cpumask_acquire(alloc);
+ bpf_cpumask_set_cpu(0, alloc);
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)ref);
+
+ bpf_cpumask_release(ref);
+ bpf_cpumask_release(alloc);
+}
+
+SEC("raw_tp")
+__failure __msg("calling kernel function")
+int BPF_PROG(cpumask_kfunc_raw_tp)
+{
+ cpumask_kfunc_load_test();
+ return 0;
+}
+
+SEC("syscall")
+__success
+int BPF_PROG(cpumask_kfunc_syscall)
+{
+ cpumask_kfunc_load_test();
+ return 0;
+}
+
+SEC("tracepoint")
+__success
+int BPF_PROG(cpumask_kfunc_tracepoint)
+{
+ cpumask_kfunc_load_test();
+ return 0;
+}
+
+SEC("perf_event")
+__success
+int BPF_PROG(cpumask_kfunc_perf_event)
+{
+ cpumask_kfunc_load_test();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_ld_ind.c b/tools/testing/selftests/bpf/progs/verifier_ld_ind.c
new file mode 100644
index 000000000000..c925ba9a2e74
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_ld_ind.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/ld_ind.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("ld_ind: check calling conv, r1")
+__failure __msg("R1 !read_ok")
+__failure_unpriv
+__naked void ind_check_calling_conv_r1(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 1; \
+ .8byte %[ld_ind]; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("ld_ind: check calling conv, r2")
+__failure __msg("R2 !read_ok")
+__failure_unpriv
+__naked void ind_check_calling_conv_r2(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r2 = 1; \
+ .8byte %[ld_ind]; \
+ r0 = r2; \
+ exit; \
+" :
+ : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("ld_ind: check calling conv, r3")
+__failure __msg("R3 !read_ok")
+__failure_unpriv
+__naked void ind_check_calling_conv_r3(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r3 = 1; \
+ .8byte %[ld_ind]; \
+ r0 = r3; \
+ exit; \
+" :
+ : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("ld_ind: check calling conv, r4")
+__failure __msg("R4 !read_ok")
+__failure_unpriv
+__naked void ind_check_calling_conv_r4(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r4 = 1; \
+ .8byte %[ld_ind]; \
+ r0 = r4; \
+ exit; \
+" :
+ : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("ld_ind: check calling conv, r5")
+__failure __msg("R5 !read_ok")
+__failure_unpriv
+__naked void ind_check_calling_conv_r5(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r5 = 1; \
+ .8byte %[ld_ind]; \
+ r0 = r5; \
+ exit; \
+" :
+ : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("ld_ind: check calling conv, r7")
+__success __success_unpriv __retval(1)
+__naked void ind_check_calling_conv_r7(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r7 = 1; \
+ .8byte %[ld_ind]; \
+ r0 = r7; \
+ exit; \
+" :
+ : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
new file mode 100644
index 000000000000..52edee41caf6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
+
+SEC("socket")
+__description("LDSX, S8")
+__success __success_unpriv __retval(-2)
+__naked void ldsx_s8(void)
+{
+ asm volatile (
+ "r1 = 0x3fe;"
+ "*(u64 *)(r10 - 8) = r1;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(s8 *)(r10 - 8);"
+#else
+ "r0 = *(s8 *)(r10 - 1);"
+#endif
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__description("LDSX, S16")
+__success __success_unpriv __retval(-2)
+__naked void ldsx_s16(void)
+{
+ asm volatile (
+ "r1 = 0x3fffe;"
+ "*(u64 *)(r10 - 8) = r1;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(s16 *)(r10 - 8);"
+#else
+ "r0 = *(s16 *)(r10 - 2);"
+#endif
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__description("LDSX, S32")
+__success __success_unpriv __retval(-1)
+__naked void ldsx_s32(void)
+{
+ asm volatile (
+ "r1 = 0xfffffffe;"
+ "*(u64 *)(r10 - 8) = r1;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(s32 *)(r10 - 8);"
+#else
+ "r0 = *(s32 *)(r10 - 4);"
+#endif
+ "r0 >>= 1;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__description("LDSX, S8 range checking, privileged")
+__log_level(2) __success __retval(1)
+__msg("R1_w=scalar(smin=smin32=-128,smax=smax32=127)")
+__naked void ldsx_s8_range_priv(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "*(u64 *)(r10 - 8) = r0;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r1 = *(s8 *)(r10 - 8);"
+#else
+ "r1 = *(s8 *)(r10 - 1);"
+#endif
+ /* r1 with s8 range */
+ "if r1 s> 0x7f goto l0_%=;"
+ "if r1 s< -0x80 goto l0_%=;"
+ "r0 = 1;"
+"l1_%=:"
+ "exit;"
+"l0_%=:"
+ "r0 = 2;"
+ "goto l1_%=;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("LDSX, S16 range checking")
+__success __success_unpriv __retval(1)
+__naked void ldsx_s16_range(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "*(u64 *)(r10 - 8) = r0;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r1 = *(s16 *)(r10 - 8);"
+#else
+ "r1 = *(s16 *)(r10 - 2);"
+#endif
+ /* r1 with s16 range */
+ "if r1 s> 0x7fff goto l0_%=;"
+ "if r1 s< -0x8000 goto l0_%=;"
+ "r0 = 1;"
+"l1_%=:"
+ "exit;"
+"l0_%=:"
+ "r0 = 2;"
+ "goto l1_%=;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("LDSX, S32 range checking")
+__success __success_unpriv __retval(1)
+__naked void ldsx_s32_range(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "*(u64 *)(r10 - 8) = r0;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r1 = *(s32 *)(r10 - 8);"
+#else
+ "r1 = *(s32 *)(r10 - 4);"
+#endif
+ /* r1 with s16 range */
+ "if r1 s> 0x7fffFFFF goto l0_%=;"
+ "if r1 s< -0x80000000 goto l0_%=;"
+ "r0 = 1;"
+"l1_%=:"
+ "exit;"
+"l0_%=:"
+ "r0 = 2;"
+ "goto l1_%=;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("LDSX, xdp s32 xdp_md->data")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_1(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[xdp_md_data]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("LDSX, xdp s32 xdp_md->data_end")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_2(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[xdp_md_data_end]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("LDSX, xdp s32 xdp_md->data_meta")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_3(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[xdp_md_data_meta]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("tcx/ingress")
+__description("LDSX, tcx s32 __sk_buff->data")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_4(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[sk_buff_data]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data))
+ : __clobber_all);
+}
+
+SEC("tcx/ingress")
+__description("LDSX, tcx s32 __sk_buff->data_end")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_5(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[sk_buff_data_end]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tcx/ingress")
+__description("LDSX, tcx s32 __sk_buff->data_meta")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_6(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[sk_buff_data_meta]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
+ : __clobber_all);
+}
+
+SEC("flow_dissector")
+__description("LDSX, flow_dissector s32 __sk_buff->data")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_7(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[sk_buff_data]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data))
+ : __clobber_all);
+}
+
+SEC("flow_dissector")
+__description("LDSX, flow_dissector s32 __sk_buff->data_end")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_8(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[sk_buff_data_end]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+#else
+
+SEC("socket")
+__description("cpuv4 is not supported by compiler or jit, use a dummy test")
+__success
+int dummy_test(void)
+{
+ return 0;
+}
+
+#endif
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_leak_ptr.c b/tools/testing/selftests/bpf/progs/verifier_leak_ptr.c
new file mode 100644
index 000000000000..d153fbe50055
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_leak_ptr.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/leak_ptr.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("socket")
+__description("leak pointer into ctx 1")
+__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed")
+__failure_unpriv __msg_unpriv("R2 leaks addr into mem")
+__naked void leak_pointer_into_ctx_1(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ *(u64*)(r1 + %[__sk_buff_cb_0]) = r0; \
+ r2 = %[map_hash_8b] ll; \
+ lock *(u64 *)(r1 + %[__sk_buff_cb_0]) += r2; \
+ exit; \
+" :
+ : __imm_addr(map_hash_8b),
+ __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("leak pointer into ctx 2")
+__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed")
+__failure_unpriv __msg_unpriv("R10 leaks addr into mem")
+__naked void leak_pointer_into_ctx_2(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ *(u64*)(r1 + %[__sk_buff_cb_0]) = r0; \
+ lock *(u64 *)(r1 + %[__sk_buff_cb_0]) += r10; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("leak pointer into ctx 3")
+__success __failure_unpriv __msg_unpriv("R2 leaks addr into ctx")
+__retval(0)
+__naked void leak_pointer_into_ctx_3(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ r2 = %[map_hash_8b] ll; \
+ *(u64*)(r1 + %[__sk_buff_cb_0]) = r2; \
+ exit; \
+" :
+ : __imm_addr(map_hash_8b),
+ __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("leak pointer into map val")
+__success __failure_unpriv __msg_unpriv("R6 leaks addr into mem")
+__retval(0)
+__naked void leak_pointer_into_map_val(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r3 = 0; \
+ *(u64*)(r0 + 0) = r3; \
+ lock *(u64 *)(r0 + 0) += r6; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c b/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
new file mode 100644
index 000000000000..8f755d2464cf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("scalars: find linked scalars")
+__failure
+__msg("math between fp pointer and 2147483647 is not allowed")
+__naked void scalars(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ r1 = 0x80000001 ll; \
+ r1 /= 1; \
+ r2 = r1; \
+ r4 = r1; \
+ w2 += 0x7FFFFFFF; \
+ w4 += 0; \
+ if r2 == 0 goto l1; \
+ exit; \
+l1: \
+ r4 >>= 63; \
+ r3 = 1; \
+ r3 -= r4; \
+ r3 *= 0x7FFFFFFF; \
+ r3 += r10; \
+ *(u8*)(r3 - 1) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_load_acquire.c b/tools/testing/selftests/bpf/progs/verifier_load_acquire.c
new file mode 100644
index 000000000000..74f4f19c10b8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_load_acquire.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Google LLC. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+#ifdef CAN_USE_LOAD_ACQ_STORE_REL
+
+SEC("socket")
+__description("load-acquire, 8-bit")
+__success __success_unpriv __retval(0)
+__naked void load_acquire_8(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "w1 = 0xfe;"
+ "*(u8 *)(r10 - 1) = w1;"
+ ".8byte %[load_acquire_insn];" // w2 = load_acquire((u8 *)(r10 - 1));
+ "if r2 == r1 goto 1f;"
+ "r0 = 1;"
+"1:"
+ "exit;"
+ :
+ : __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -1))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("load-acquire, 16-bit")
+__success __success_unpriv __retval(0)
+__naked void load_acquire_16(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "w1 = 0xfedc;"
+ "*(u16 *)(r10 - 2) = w1;"
+ ".8byte %[load_acquire_insn];" // w2 = load_acquire((u16 *)(r10 - 2));
+ "if r2 == r1 goto 1f;"
+ "r0 = 1;"
+"1:"
+ "exit;"
+ :
+ : __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_H, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -2))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("load-acquire, 32-bit")
+__success __success_unpriv __retval(0)
+__naked void load_acquire_32(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "w1 = 0xfedcba09;"
+ "*(u32 *)(r10 - 4) = w1;"
+ ".8byte %[load_acquire_insn];" // w2 = load_acquire((u32 *)(r10 - 4));
+ "if r2 == r1 goto 1f;"
+ "r0 = 1;"
+"1:"
+ "exit;"
+ :
+ : __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_W, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -4))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("load-acquire, 64-bit")
+__success __success_unpriv __retval(0)
+__naked void load_acquire_64(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "r1 = 0xfedcba0987654321 ll;"
+ "*(u64 *)(r10 - 8) = r1;"
+ ".8byte %[load_acquire_insn];" // r2 = load_acquire((u64 *)(r10 - 8));
+ "if r2 == r1 goto 1f;"
+ "r0 = 1;"
+"1:"
+ "exit;"
+ :
+ : __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("load-acquire with uninitialized src_reg")
+__failure __failure_unpriv __msg("R2 !read_ok")
+__naked void load_acquire_with_uninitialized_src_reg(void)
+{
+ asm volatile (
+ ".8byte %[load_acquire_insn];" // r0 = load_acquire((u64 *)(r2 + 0));
+ "exit;"
+ :
+ : __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("load-acquire with non-pointer src_reg")
+__failure __failure_unpriv __msg("R1 invalid mem access 'scalar'")
+__naked void load_acquire_with_non_pointer_src_reg(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ ".8byte %[load_acquire_insn];" // r0 = load_acquire((u64 *)(r1 + 0));
+ "exit;"
+ :
+ : __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_1, 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("misaligned load-acquire")
+__failure __failure_unpriv __msg("misaligned stack access off")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void load_acquire_misaligned(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ "*(u64 *)(r10 - 8) = r1;"
+ ".8byte %[load_acquire_insn];" // w0 = load_acquire((u32 *)(r10 - 5));
+ "exit;"
+ :
+ : __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_W, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -5))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("load-acquire from ctx pointer")
+__failure __failure_unpriv __msg("BPF_ATOMIC loads from R1 ctx is not allowed")
+__naked void load_acquire_from_ctx_pointer(void)
+{
+ asm volatile (
+ ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r1 + 0));
+ "exit;"
+ :
+ : __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_1, 0))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("load-acquire from pkt pointer")
+__failure __msg("BPF_ATOMIC loads from R2 pkt is not allowed")
+__naked void load_acquire_from_pkt_pointer(void)
+{
+ asm volatile (
+ "r2 = *(u32 *)(r1 + %[xdp_md_data]);"
+ "r3 = *(u32 *)(r1 + %[xdp_md_data_end]);"
+ "r1 = r2;"
+ "r1 += 8;"
+ "if r1 >= r3 goto l0_%=;"
+ ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r2 + 0));
+"l0_%=: r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
+ __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0))
+ : __clobber_all);
+}
+
+SEC("flow_dissector")
+__description("load-acquire from flow_keys pointer")
+__failure __msg("BPF_ATOMIC loads from R2 flow_keys is not allowed")
+__naked void load_acquire_from_flow_keys_pointer(void)
+{
+ asm volatile (
+ "r2 = *(u64 *)(r1 + %[__sk_buff_flow_keys]);"
+ ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r2 + 0));
+ "exit;"
+ :
+ : __imm_const(__sk_buff_flow_keys,
+ offsetof(struct __sk_buff, flow_keys)),
+ __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0))
+ : __clobber_all);
+}
+
+SEC("sk_reuseport")
+__description("load-acquire from sock pointer")
+__failure __msg("BPF_ATOMIC loads from R2 sock is not allowed")
+__naked void load_acquire_from_sock_pointer(void)
+{
+ asm volatile (
+ "r2 = *(u64 *)(r1 + %[sk_reuseport_md_sk]);"
+ // w0 = load_acquire((u8 *)(r2 + offsetof(struct bpf_sock, family)));
+ ".8byte %[load_acquire_insn];"
+ "exit;"
+ :
+ : __imm_const(sk_reuseport_md_sk, offsetof(struct sk_reuseport_md, sk)),
+ __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2,
+ offsetof(struct bpf_sock, family)))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("load-acquire with invalid register R15")
+__failure __failure_unpriv __msg("R15 is invalid")
+__naked void load_acquire_with_invalid_reg(void)
+{
+ asm volatile (
+ ".8byte %[load_acquire_insn];" // r0 = load_acquire((u64 *)(r15 + 0));
+ "exit;"
+ :
+ : __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, 15 /* invalid reg */, 0))
+ : __clobber_all);
+}
+
+#else /* CAN_USE_LOAD_ACQ_STORE_REL */
+
+SEC("socket")
+__description("Clang version < 18, ENABLE_ATOMICS_TESTS not defined, and/or JIT doesn't support load-acquire, use a dummy test")
+__success
+int dummy_test(void)
+{
+ return 0;
+}
+
+#endif /* CAN_USE_LOAD_ACQ_STORE_REL */
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c
new file mode 100644
index 000000000000..e07b43b78fd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/loops1.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("xdp")
+__description("bounded loop, count to 4")
+__success __retval(4)
+__naked void bounded_loop_count_to_4(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+l0_%=: r0 += 1; \
+ if r0 < 4 goto l0_%=; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tracepoint")
+__description("bounded loop, count to 20")
+__success
+__naked void bounded_loop_count_to_20(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+l0_%=: r0 += 3; \
+ if r0 < 20 goto l0_%=; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tracepoint")
+__description("bounded loop, count from positive unknown to 4")
+__success
+__naked void from_positive_unknown_to_4(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ if r0 s< 0 goto l0_%=; \
+l1_%=: r0 += 1; \
+ if r0 < 4 goto l1_%=; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("bounded loop, count from totally unknown to 4")
+__success
+__naked void from_totally_unknown_to_4(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+l0_%=: r0 += 1; \
+ if r0 < 4 goto l0_%=; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("bounded loop, count to 4 with equality")
+__success
+__naked void count_to_4_with_equality(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+l0_%=: r0 += 1; \
+ if r0 != 4 goto l0_%=; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("bounded loop, start in the middle")
+__success
+__failure_unpriv __msg_unpriv("back-edge")
+__naked void loop_start_in_the_middle(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ goto l0_%=; \
+l1_%=: r0 += 1; \
+l0_%=: if r0 < 4 goto l1_%=; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("xdp")
+__description("bounded loop containing a forward jump")
+__success __retval(4)
+__naked void loop_containing_a_forward_jump(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+l1_%=: r0 += 1; \
+ if r0 == r0 goto l0_%=; \
+l0_%=: if r0 < 4 goto l1_%=; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tracepoint")
+__description("bounded loop that jumps out rather than in")
+__success
+__naked void jumps_out_rather_than_in(void)
+{
+ asm volatile (" \
+ r6 = 0; \
+l1_%=: r6 += 1; \
+ if r6 > 10000 goto l0_%=; \
+ call %[bpf_get_prandom_u32]; \
+ goto l1_%=; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("infinite loop after a conditional jump")
+__failure __msg("program is too large")
+__naked void loop_after_a_conditional_jump(void)
+{
+ asm volatile (" \
+ r0 = 5; \
+ if r0 < 4 goto l0_%=; \
+l1_%=: r0 += 1; \
+ goto l1_%=; \
+l0_%=: exit; \
+" ::: __clobber_all);
+}
+
+SEC("tracepoint")
+__description("bounded recursion")
+__failure
+/* verifier limitation in detecting max stack depth */
+__msg("the call stack of 8 frames is too deep !")
+__naked void bounded_recursion(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ call bounded_recursion__1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void bounded_recursion__1(void)
+{
+ asm volatile (" \
+ r1 += 1; \
+ r0 = r1; \
+ if r1 < 4 goto l0_%=; \
+ exit; \
+l0_%=: call bounded_recursion__1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tracepoint")
+__description("infinite loop in two jumps")
+__failure __msg("loop detected")
+__naked void infinite_loop_in_two_jumps(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+l1_%=: goto l0_%=; \
+l0_%=: if r0 < 4 goto l1_%=; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tracepoint")
+__description("infinite loop: three-jump trick")
+__failure __msg("loop detected")
+__naked void infinite_loop_three_jump_trick(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+l2_%=: r0 += 1; \
+ r0 &= 1; \
+ if r0 < 2 goto l0_%=; \
+ exit; \
+l0_%=: r0 += 1; \
+ r0 &= 1; \
+ if r0 < 2 goto l1_%=; \
+ exit; \
+l1_%=: r0 += 1; \
+ r0 &= 1; \
+ if r0 < 2 goto l2_%=; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("xdp")
+__description("not-taken loop with back jump to 1st insn")
+__success __retval(123)
+__naked void back_jump_to_1st_insn_1(void)
+{
+ asm volatile (" \
+l0_%=: r0 = 123; \
+ if r0 == 4 goto l0_%=; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("xdp")
+__description("taken loop with back jump to 1st insn")
+__success __retval(55)
+__naked void back_jump_to_1st_insn_2(void)
+{
+ asm volatile (" \
+ r1 = 10; \
+ r2 = 0; \
+ call back_jump_to_1st_insn_2__1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void back_jump_to_1st_insn_2__1(void)
+{
+ asm volatile (" \
+l0_%=: r2 += r1; \
+ r1 -= 1; \
+ if r1 != 0 goto l0_%=; \
+ r0 = r2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("xdp")
+__description("taken loop with back jump to 1st insn, 2")
+__success __retval(55)
+__naked void jump_to_1st_insn_2(void)
+{
+ asm volatile (" \
+ r1 = 10; \
+ r2 = 0; \
+ call jump_to_1st_insn_2__1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void jump_to_1st_insn_2__1(void)
+{
+ asm volatile (" \
+l0_%=: r2 += r1; \
+ r1 -= 1; \
+ if w1 != 0 goto l0_%=; \
+ r0 = r2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("xdp")
+__success
+__naked void not_an_inifinite_loop(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0xff; \
+ *(u64 *)(r10 - 8) = r0; \
+ r0 = 0; \
+loop_%=: \
+ r0 = *(u64 *)(r10 - 8); \
+ if r0 > 10 goto exit_%=; \
+ r0 += 1; \
+ *(u64 *)(r10 - 8) = r0; \
+ r0 = 0; \
+ goto loop_%=; \
+exit_%=: \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_lsm.c b/tools/testing/selftests/bpf/progs/verifier_lsm.c
new file mode 100644
index 000000000000..32e5e779cb96
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_lsm.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("lsm/file_alloc_security")
+__description("lsm bpf prog with -4095~0 retval. test 1")
+__success
+__naked int errno_zero_retval_test1(void *ctx)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/file_alloc_security")
+__description("lsm bpf prog with -4095~0 retval. test 2")
+__success
+__naked int errno_zero_retval_test2(void *ctx)
+{
+ asm volatile (
+ "r0 = -4095;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/file_mprotect")
+__description("lsm bpf prog with -4095~0 retval. test 4")
+__failure __msg("R0 has smin=-4096 smax=-4096 should have been in [-4095, 0]")
+__naked int errno_zero_retval_test4(void *ctx)
+{
+ asm volatile (
+ "r0 = -4096;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/file_mprotect")
+__description("lsm bpf prog with -4095~0 retval. test 5")
+__failure __msg("R0 has smin=4096 smax=4096 should have been in [-4095, 0]")
+__naked int errno_zero_retval_test5(void *ctx)
+{
+ asm volatile (
+ "r0 = 4096;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/file_mprotect")
+__description("lsm bpf prog with -4095~0 retval. test 6")
+__failure __msg("R0 has smin=1 smax=1 should have been in [-4095, 0]")
+__naked int errno_zero_retval_test6(void *ctx)
+{
+ asm volatile (
+ "r0 = 1;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/audit_rule_known")
+__description("lsm bpf prog with bool retval. test 1")
+__success
+__naked int bool_retval_test1(void *ctx)
+{
+ asm volatile (
+ "r0 = 1;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/audit_rule_known")
+__description("lsm bpf prog with bool retval. test 2")
+__success
+__success
+__naked int bool_retval_test2(void *ctx)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/audit_rule_known")
+__description("lsm bpf prog with bool retval. test 3")
+__failure __msg("R0 has smin=-1 smax=-1 should have been in [0, 1]")
+__naked int bool_retval_test3(void *ctx)
+{
+ asm volatile (
+ "r0 = -1;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/audit_rule_known")
+__description("lsm bpf prog with bool retval. test 4")
+__failure __msg("R0 has smin=2 smax=2 should have been in [0, 1]")
+__naked int bool_retval_test4(void *ctx)
+{
+ asm volatile (
+ "r0 = 2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/file_free_security")
+__success
+__description("lsm bpf prog with void retval. test 1")
+__naked int void_retval_test1(void *ctx)
+{
+ asm volatile (
+ "r0 = -4096;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/file_free_security")
+__success
+__description("lsm bpf prog with void retval. test 2")
+__naked int void_retval_test2(void *ctx)
+{
+ asm volatile (
+ "r0 = 4096;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/getprocattr")
+__description("lsm disabled hook: getprocattr")
+__failure __msg("points to disabled hook")
+__naked int disabled_hook_test1(void *ctx)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/setprocattr")
+__description("lsm disabled hook: setprocattr")
+__failure __msg("points to disabled hook")
+__naked int disabled_hook_test2(void *ctx)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/ismaclabel")
+__description("lsm disabled hook: ismaclabel")
+__failure __msg("points to disabled hook")
+__naked int disabled_hook_test3(void *ctx)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_lwt.c b/tools/testing/selftests/bpf/progs/verifier_lwt.c
new file mode 100644
index 000000000000..5ab746307309
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_lwt.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/lwt.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("lwt_in")
+__description("invalid direct packet write for LWT_IN")
+__failure __msg("cannot write into packet")
+__naked void packet_write_for_lwt_in(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ *(u8*)(r2 + 0) = r2; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("lwt_out")
+__description("invalid direct packet write for LWT_OUT")
+__failure __msg("cannot write into packet")
+__naked void packet_write_for_lwt_out(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ *(u8*)(r2 + 0) = r2; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("lwt_xmit")
+__description("direct packet write for LWT_XMIT")
+__success __retval(0)
+__naked void packet_write_for_lwt_xmit(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ *(u8*)(r2 + 0) = r2; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("lwt_in")
+__description("direct packet read for LWT_IN")
+__success __retval(0)
+__naked void packet_read_for_lwt_in(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("lwt_out")
+__description("direct packet read for LWT_OUT")
+__success __retval(0)
+__naked void packet_read_for_lwt_out(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("lwt_xmit")
+__description("direct packet read for LWT_XMIT")
+__success __retval(0)
+__naked void packet_read_for_lwt_xmit(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("lwt_xmit")
+__description("overlapping checks for direct packet access")
+__success __retval(0)
+__naked void checks_for_direct_packet_access(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r1 = r2; \
+ r1 += 6; \
+ if r1 > r3 goto l0_%=; \
+ r0 = *(u16*)(r2 + 6); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("lwt_xmit")
+__description("make headroom for LWT_XMIT")
+__success __retval(0)
+__naked void make_headroom_for_lwt_xmit(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r2 = 34; \
+ r3 = 0; \
+ call %[bpf_skb_change_head]; \
+ /* split for s390 to succeed */ \
+ r1 = r6; \
+ r2 = 42; \
+ r3 = 0; \
+ call %[bpf_skb_change_head]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_skb_change_head)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid access of tc_classid for LWT_IN")
+__failure __msg("invalid bpf_context access")
+__failure_unpriv
+__naked void tc_classid_for_lwt_in(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
+ exit; \
+" :
+ : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid access of tc_classid for LWT_OUT")
+__failure __msg("invalid bpf_context access")
+__failure_unpriv
+__naked void tc_classid_for_lwt_out(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
+ exit; \
+" :
+ : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid access of tc_classid for LWT_XMIT")
+__failure __msg("invalid bpf_context access")
+__failure_unpriv
+__naked void tc_classid_for_lwt_xmit(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
+ exit; \
+" :
+ : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
+ : __clobber_all);
+}
+
+SEC("lwt_in")
+__description("check skb->tc_classid half load not permitted for lwt prog")
+__failure __msg("invalid bpf_context access")
+__naked void not_permitted_for_lwt_prog(void)
+{
+ asm volatile (
+ "r0 = 0;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(u16*)(r1 + %[__sk_buff_tc_classid]);"
+#else
+ "r0 = *(u16*)(r1 + %[__imm_0]);"
+#endif
+ "exit;"
+ :
+ : __imm_const(__imm_0, offsetof(struct __sk_buff, tc_classid) + 2),
+ __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c
new file mode 100644
index 000000000000..16b761e510f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/map_in_map.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ });
+} map_in_map SEC(".maps");
+
+SEC("socket")
+__description("map in map access")
+__success __success_unpriv __retval(0)
+__naked void map_in_map_access(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_in_map] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = r0; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_in_map)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("map in map state pruning")
+__success __msg("processed 15 insns")
+__log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ)
+__naked void map_in_map_state_pruning(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r6 = r10; \
+ r6 += -4; \
+ r2 = r6; \
+ r1 = %[map_in_map] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r2 = r6; \
+ r1 = r0; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l1_%=; \
+ r2 = r6; \
+ r1 = %[map_in_map] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l2_%=; \
+ exit; \
+l2_%=: r2 = r6; \
+ r1 = r0; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l1_%=; \
+ exit; \
+l1_%=: r0 = *(u32*)(r0 + 0); \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_in_map)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid inner map pointer")
+__failure __msg("R1 pointer arithmetic on map_ptr prohibited")
+__failure_unpriv
+__naked void invalid_inner_map_pointer(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_in_map] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = r0; \
+ r1 += 8; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_in_map)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("forgot null checking on the inner map pointer")
+__failure __msg("R1 type=map_value_or_null expected=map_ptr")
+__failure_unpriv
+__naked void on_the_inner_map_pointer(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_in_map] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = r0; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_in_map)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map_ptr is never null")
+__success
+__naked void map_ptr_is_never_null(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ r1 = %[map_in_map] ll; \
+ if r1 != 0 goto l0_%=; \
+ r10 = 42; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_in_map)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map_ptr is never null inner")
+__success
+__naked void map_ptr_is_never_null_inner(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_in_map] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ if r0 != 0 goto l0_%=; \
+ r10 = 42; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_in_map)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map_ptr is never null inner spill fill")
+__success
+__naked void map_ptr_is_never_null_inner_spill_fill(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_in_map] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: *(u64 *)(r10 -16) = r0; \
+ r1 = *(u64 *)(r10 -16); \
+ if r1 == 0 goto l1_%=; \
+ exit; \
+l1_%=: r10 = 42; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_in_map)
+ : __clobber_all);
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 64 * 1024);
+ });
+} rb_in_map SEC(".maps");
+
+struct rb_ctx {
+ void *rb;
+ struct bpf_dynptr dptr;
+};
+
+static __always_inline struct rb_ctx __rb_event_reserve(__u32 sz)
+{
+ struct rb_ctx rb_ctx = {};
+ void *rb;
+ __u32 cpu = bpf_get_smp_processor_id();
+ __u32 rb_slot = cpu & 1;
+
+ rb = bpf_map_lookup_elem(&rb_in_map, &rb_slot);
+ if (!rb)
+ return rb_ctx;
+
+ rb_ctx.rb = rb;
+ bpf_ringbuf_reserve_dynptr(rb, sz, 0, &rb_ctx.dptr);
+
+ return rb_ctx;
+}
+
+static __noinline void __rb_event_submit(struct rb_ctx *ctx)
+{
+ if (!ctx->rb)
+ return;
+
+ /* If the verifier (incorrectly) concludes that ctx->rb can be
+ * NULL at this point, we'll get "BPF_EXIT instruction in main
+ * prog would lead to reference leak" error
+ */
+ bpf_ringbuf_submit_dynptr(&ctx->dptr, 0);
+}
+
+SEC("socket")
+int map_ptr_is_never_null_rb(void *ctx)
+{
+ struct rb_ctx event_ctx = __rb_event_reserve(256);
+ __rb_event_submit(&event_ctx);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_map_ptr.c b/tools/testing/selftests/bpf/progs/verifier_map_ptr.c
new file mode 100644
index 000000000000..11a079145966
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_map_ptr.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/map_ptr.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct test_val);
+} map_array_48b SEC(".maps");
+
+struct other_val {
+ long long foo;
+ long long bar;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct other_val);
+} map_hash_16b SEC(".maps");
+
+SEC("socket")
+__description("bpf_map_ptr: read with negative offset rejected")
+__failure __msg("R1 is bpf_array invalid negative access: off=-8")
+__failure_unpriv
+__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
+__naked void read_with_negative_offset_rejected(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 = %[map_array_48b] ll; \
+ r6 = *(u64*)(r1 - 8); \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bpf_map_ptr: write rejected")
+__failure __msg("only read from bpf_array is supported")
+__failure_unpriv
+__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
+__naked void bpf_map_ptr_write_rejected(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ *(u64*)(r10 - 8) = r0; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ *(u64*)(r1 + 0) = r2; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bpf_map_ptr: read non-existent field rejected")
+__failure
+__msg("cannot access ptr member ops with moff 0 in struct bpf_map with off 1 size 4")
+__failure_unpriv
+__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void read_non_existent_field_rejected(void)
+{
+ asm volatile (" \
+ r6 = 0; \
+ r1 = %[map_array_48b] ll; \
+ r6 = *(u32*)(r1 + 1); \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bpf_map_ptr: read ops field accepted")
+__success __failure_unpriv
+__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
+__retval(1)
+__naked void ptr_read_ops_field_accepted(void)
+{
+ asm volatile (" \
+ r6 = 0; \
+ r1 = %[map_array_48b] ll; \
+ r6 = *(u64*)(r1 + 0); \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bpf_map_ptr: r = 0, map_ptr = map_ptr + r")
+__success __failure_unpriv
+__msg_unpriv("R1 has pointer with unsupported alu operation")
+__retval(0)
+__naked void map_ptr_map_ptr_r(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ *(u64*)(r10 - 8) = r0; \
+ r2 = r10; \
+ r2 += -8; \
+ r0 = 0; \
+ r1 = %[map_hash_16b] ll; \
+ r1 += r0; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_16b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("bpf_map_ptr: r = 0, r = r + map_ptr")
+__success __failure_unpriv
+__msg_unpriv("R0 has pointer with unsupported alu operation")
+__retval(0)
+__naked void _0_r_r_map_ptr(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ *(u64*)(r10 - 8) = r0; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ r0 = %[map_hash_16b] ll; \
+ r1 += r0; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_16b)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c b/tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c
new file mode 100644
index 000000000000..c5a7c1ddc562
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/map_ptr_mixing.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct test_val);
+} map_array_48b SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ });
+} map_in_map SEC(".maps");
+
+void dummy_prog_42_socket(void);
+void dummy_prog_24_socket(void);
+void dummy_prog_loop1_socket(void);
+void dummy_prog_loop2_socket(void);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 4);
+ __uint(key_size, sizeof(int));
+ __array(values, void (void));
+} map_prog1_socket SEC(".maps") = {
+ .values = {
+ [0] = (void *)&dummy_prog_42_socket,
+ [1] = (void *)&dummy_prog_loop1_socket,
+ [2] = (void *)&dummy_prog_24_socket,
+ },
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 8);
+ __uint(key_size, sizeof(int));
+ __array(values, void (void));
+} map_prog2_socket SEC(".maps") = {
+ .values = {
+ [1] = (void *)&dummy_prog_loop2_socket,
+ [2] = (void *)&dummy_prog_24_socket,
+ [7] = (void *)&dummy_prog_42_socket,
+ },
+};
+
+SEC("socket")
+__auxiliary __auxiliary_unpriv
+__naked void dummy_prog_42_socket(void)
+{
+ asm volatile ("r0 = 42; exit;");
+}
+
+SEC("socket")
+__auxiliary __auxiliary_unpriv
+__naked void dummy_prog_24_socket(void)
+{
+ asm volatile ("r0 = 24; exit;");
+}
+
+SEC("socket")
+__auxiliary __auxiliary_unpriv
+__naked void dummy_prog_loop1_socket(void)
+{
+ asm volatile (" \
+ r3 = 1; \
+ r2 = %[map_prog1_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 41; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket)
+ : __clobber_all);
+}
+
+SEC("socket")
+__auxiliary __auxiliary_unpriv
+__naked void dummy_prog_loop2_socket(void)
+{
+ asm volatile (" \
+ r3 = 1; \
+ r2 = %[map_prog2_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 41; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog2_socket)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("calls: two calls returning different map pointers for lookup (hash, array)")
+__success __retval(1)
+__naked void pointers_for_lookup_hash_array(void)
+{
+ asm volatile (" \
+ /* main prog */ \
+ if r1 != 0 goto l0_%=; \
+ call pointers_for_lookup_hash_array__1; \
+ goto l1_%=; \
+l0_%=: call pointers_for_lookup_hash_array__2; \
+l1_%=: r1 = r0; \
+ r2 = 0; \
+ *(u64*)(r10 - 8) = r2; \
+ r2 = r10; \
+ r2 += -8; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+ r0 = 1; \
+l2_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void pointers_for_lookup_hash_array__1(void)
+{
+ asm volatile (" \
+ r0 = %[map_hash_48b] ll; \
+ exit; \
+" :
+ : __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void pointers_for_lookup_hash_array__2(void)
+{
+ asm volatile (" \
+ r0 = %[map_array_48b] ll; \
+ exit; \
+" :
+ : __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("calls: two calls returning different map pointers for lookup (hash, map in map)")
+__failure __msg("only read from bpf_array is supported")
+__naked void lookup_hash_map_in_map(void)
+{
+ asm volatile (" \
+ /* main prog */ \
+ if r1 != 0 goto l0_%=; \
+ call lookup_hash_map_in_map__1; \
+ goto l1_%=; \
+l0_%=: call lookup_hash_map_in_map__2; \
+l1_%=: r1 = r0; \
+ r2 = 0; \
+ *(u64*)(r10 - 8) = r2; \
+ r2 = r10; \
+ r2 += -8; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+ r0 = 1; \
+l2_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void lookup_hash_map_in_map__1(void)
+{
+ asm volatile (" \
+ r0 = %[map_array_48b] ll; \
+ exit; \
+" :
+ : __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void lookup_hash_map_in_map__2(void)
+{
+ asm volatile (" \
+ r0 = %[map_in_map] ll; \
+ exit; \
+" :
+ : __imm_addr(map_in_map)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("cond: two branches returning different map pointers for lookup (tail, tail)")
+__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr")
+__retval(42)
+__naked void pointers_for_lookup_tail_tail_1(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
+ if r6 != 0 goto l0_%=; \
+ r2 = %[map_prog2_socket] ll; \
+ goto l1_%=; \
+l0_%=: r2 = %[map_prog1_socket] ll; \
+l1_%=: r3 = 7; \
+ call %[bpf_tail_call]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket),
+ __imm_addr(map_prog2_socket),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("cond: two branches returning same map pointers for lookup (tail, tail)")
+__success __success_unpriv __retval(42)
+__naked void pointers_for_lookup_tail_tail_2(void)
+{
+ asm volatile (" \
+ r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
+ if r6 == 0 goto l0_%=; \
+ r2 = %[map_prog2_socket] ll; \
+ goto l1_%=; \
+l0_%=: r2 = %[map_prog2_socket] ll; \
+l1_%=: r3 = 7; \
+ call %[bpf_tail_call]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog2_socket),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_map_ret_val.c b/tools/testing/selftests/bpf/progs/verifier_map_ret_val.c
new file mode 100644
index 000000000000..1639628b832d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_map_ret_val.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/map_ret_val.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("socket")
+__description("invalid map_fd for function call")
+__failure __msg("fd 0 is not pointing to valid bpf_map")
+__failure_unpriv
+__naked void map_fd_for_function_call(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ *(u64*)(r10 - 8) = r2; \
+ r2 = r10; \
+ r2 += -8; \
+ .8byte %[ld_map_fd]; \
+ .8byte 0; \
+ call %[bpf_map_delete_elem]; \
+ exit; \
+" :
+ : __imm(bpf_map_delete_elem),
+ __imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("don't check return value before access")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+__failure_unpriv
+__naked void check_return_value_before_access(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r1 = 0; \
+ *(u64*)(r0 + 0) = r1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("access memory with incorrect alignment")
+__failure __msg("misaligned value access")
+__failure_unpriv
+__flag(BPF_F_STRICT_ALIGNMENT)
+__naked void access_memory_with_incorrect_alignment_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 0; \
+ *(u64*)(r0 + 4) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("sometimes access memory with incorrect alignment")
+__failure __msg("R0 invalid mem access")
+__msg_unpriv("R0 leaks addr")
+__flag(BPF_F_STRICT_ALIGNMENT)
+__naked void access_memory_with_incorrect_alignment_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 0; \
+ *(u64*)(r0 + 0) = r1; \
+ exit; \
+l0_%=: r1 = 1; \
+ *(u64*)(r0 + 0) = r1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_masking.c b/tools/testing/selftests/bpf/progs/verifier_masking.c
new file mode 100644
index 000000000000..5732cc1b4c47
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_masking.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/masking.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("masking, test out of bounds 1")
+__success __success_unpriv __retval(0)
+__naked void test_out_of_bounds_1(void)
+{
+ asm volatile (" \
+ w1 = 5; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 5 - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test out of bounds 2")
+__success __success_unpriv __retval(0)
+__naked void test_out_of_bounds_2(void)
+{
+ asm volatile (" \
+ w1 = 1; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 1 - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test out of bounds 3")
+__success __success_unpriv __retval(0)
+__naked void test_out_of_bounds_3(void)
+{
+ asm volatile (" \
+ w1 = 0xffffffff; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 0xffffffff - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test out of bounds 4")
+__success __success_unpriv __retval(0)
+__naked void test_out_of_bounds_4(void)
+{
+ asm volatile (" \
+ w1 = 0xffffffff; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 1 - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test out of bounds 5")
+__success __success_unpriv __retval(0)
+__naked void test_out_of_bounds_5(void)
+{
+ asm volatile (" \
+ w1 = -1; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 1 - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test out of bounds 6")
+__success __success_unpriv __retval(0)
+__naked void test_out_of_bounds_6(void)
+{
+ asm volatile (" \
+ w1 = -1; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 0xffffffff - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test out of bounds 7")
+__success __success_unpriv __retval(0)
+__naked void test_out_of_bounds_7(void)
+{
+ asm volatile (" \
+ r1 = 5; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 5 - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test out of bounds 8")
+__success __success_unpriv __retval(0)
+__naked void test_out_of_bounds_8(void)
+{
+ asm volatile (" \
+ r1 = 1; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 1 - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test out of bounds 9")
+__success __success_unpriv __retval(0)
+__naked void test_out_of_bounds_9(void)
+{
+ asm volatile (" \
+ r1 = 0xffffffff; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 0xffffffff - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test out of bounds 10")
+__success __success_unpriv __retval(0)
+__naked void test_out_of_bounds_10(void)
+{
+ asm volatile (" \
+ r1 = 0xffffffff; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 1 - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test out of bounds 11")
+__success __success_unpriv __retval(0)
+__naked void test_out_of_bounds_11(void)
+{
+ asm volatile (" \
+ r1 = -1; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 1 - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test out of bounds 12")
+__success __success_unpriv __retval(0)
+__naked void test_out_of_bounds_12(void)
+{
+ asm volatile (" \
+ r1 = -1; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 0xffffffff - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test in bounds 1")
+__success __success_unpriv __retval(4)
+__naked void masking_test_in_bounds_1(void)
+{
+ asm volatile (" \
+ w1 = 4; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 5 - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test in bounds 2")
+__success __success_unpriv __retval(0)
+__naked void masking_test_in_bounds_2(void)
+{
+ asm volatile (" \
+ w1 = 0; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 0xffffffff - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test in bounds 3")
+__success __success_unpriv __retval(0xfffffffe)
+__naked void masking_test_in_bounds_3(void)
+{
+ asm volatile (" \
+ w1 = 0xfffffffe; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 0xffffffff - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test in bounds 4")
+__success __success_unpriv __retval(0xabcde)
+__naked void masking_test_in_bounds_4(void)
+{
+ asm volatile (" \
+ w1 = 0xabcde; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 0xabcdef - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test in bounds 5")
+__success __success_unpriv __retval(0)
+__naked void masking_test_in_bounds_5(void)
+{
+ asm volatile (" \
+ w1 = 0; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 1 - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test in bounds 6")
+__success __success_unpriv __retval(46)
+__naked void masking_test_in_bounds_6(void)
+{
+ asm volatile (" \
+ w1 = 46; \
+ w2 = %[__imm_0]; \
+ r2 -= r1; \
+ r2 |= r1; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r1 &= r2; \
+ r0 = r1; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 47 - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test in bounds 7")
+__success __success_unpriv __retval(46)
+__naked void masking_test_in_bounds_7(void)
+{
+ asm volatile (" \
+ r3 = -46; \
+ r3 *= -1; \
+ w2 = %[__imm_0]; \
+ r2 -= r3; \
+ r2 |= r3; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r3 &= r2; \
+ r0 = r3; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 47 - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("masking, test in bounds 8")
+__success __success_unpriv __retval(0)
+__naked void masking_test_in_bounds_8(void)
+{
+ asm volatile (" \
+ r3 = -47; \
+ r3 *= -1; \
+ w2 = %[__imm_0]; \
+ r2 -= r3; \
+ r2 |= r3; \
+ r2 = -r2; \
+ r2 s>>= 63; \
+ r3 &= r2; \
+ r0 = r3; \
+ exit; \
+" :
+ : __imm_const(__imm_0, 47 - 1)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c b/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c
new file mode 100644
index 000000000000..3966d827f288
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+SEC("raw_tp")
+__description("may_goto 0")
+__arch_x86_64
+__xlated("0: r0 = 1")
+__xlated("1: exit")
+__success
+__naked void may_goto_simple(void)
+{
+ asm volatile (
+ ".8byte %[may_goto];"
+ "r0 = 1;"
+ ".8byte %[may_goto];"
+ "exit;"
+ :
+ : __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__description("batch 2 of may_goto 0")
+__arch_x86_64
+__xlated("0: r0 = 1")
+__xlated("1: exit")
+__success
+__naked void may_goto_batch_0(void)
+{
+ asm volatile (
+ ".8byte %[may_goto1];"
+ ".8byte %[may_goto1];"
+ "r0 = 1;"
+ ".8byte %[may_goto1];"
+ ".8byte %[may_goto1];"
+ "exit;"
+ :
+ : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__description("may_goto batch with offsets 2/1/0")
+__arch_x86_64
+__xlated("0: r0 = 1")
+__xlated("1: exit")
+__success
+__naked void may_goto_batch_1(void)
+{
+ asm volatile (
+ ".8byte %[may_goto1];"
+ ".8byte %[may_goto2];"
+ ".8byte %[may_goto3];"
+ "r0 = 1;"
+ ".8byte %[may_goto1];"
+ ".8byte %[may_goto2];"
+ ".8byte %[may_goto3];"
+ "exit;"
+ :
+ : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)),
+ __imm_insn(may_goto2, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 1 /* offset */, 0)),
+ __imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__description("may_goto batch with offsets 2/0 - x86_64")
+__arch_x86_64
+__xlated("0: *(u64 *)(r10 -16) = 65535")
+__xlated("1: *(u64 *)(r10 -8) = 0")
+__xlated("2: r11 = *(u64 *)(r10 -16)")
+__xlated("3: if r11 == 0x0 goto pc+6")
+__xlated("4: r11 -= 1")
+__xlated("5: if r11 != 0x0 goto pc+2")
+__xlated("6: r11 = -16")
+__xlated("7: call unknown")
+__xlated("8: *(u64 *)(r10 -16) = r11")
+__xlated("9: r0 = 1")
+__xlated("10: r0 = 2")
+__xlated("11: exit")
+__success
+__naked void may_goto_batch_2_x86_64(void)
+{
+ asm volatile (
+ ".8byte %[may_goto1];"
+ ".8byte %[may_goto3];"
+ "r0 = 1;"
+ "r0 = 2;"
+ "exit;"
+ :
+ : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)),
+ __imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__description("may_goto batch with offsets 2/0 - arm64")
+__arch_arm64
+__xlated("0: *(u64 *)(r10 -8) = 8388608")
+__xlated("1: r11 = *(u64 *)(r10 -8)")
+__xlated("2: if r11 == 0x0 goto pc+3")
+__xlated("3: r11 -= 1")
+__xlated("4: *(u64 *)(r10 -8) = r11")
+__xlated("5: r0 = 1")
+__xlated("6: r0 = 2")
+__xlated("7: exit")
+__success
+__naked void may_goto_batch_2_arm64(void)
+{
+ asm volatile (
+ ".8byte %[may_goto1];"
+ ".8byte %[may_goto3];"
+ "r0 = 1;"
+ "r0 = 2;"
+ "exit;"
+ :
+ : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)),
+ __imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_may_goto_2.c b/tools/testing/selftests/bpf/progs/verifier_may_goto_2.c
new file mode 100644
index 000000000000..b891faf50660
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_may_goto_2.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+int gvar;
+
+SEC("raw_tp")
+__description("C code with may_goto 0")
+__success
+int may_goto_c_code(void)
+{
+ int i, tmp[3];
+
+ for (i = 0; i < 3 && can_loop; i++)
+ tmp[i] = 0;
+
+ for (i = 0; i < 3 && can_loop; i++)
+ tmp[i] = gvar - i;
+
+ for (i = 0; i < 3 && can_loop; i++)
+ gvar += tmp[i];
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_meta_access.c b/tools/testing/selftests/bpf/progs/verifier_meta_access.c
new file mode 100644
index 000000000000..d81722fb5f19
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_meta_access.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/meta_access.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("xdp")
+__description("meta access, test1")
+__success __retval(0)
+__naked void meta_access_test1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("meta access, test2")
+__failure __msg("invalid access to packet, off=-8")
+__naked void meta_access_test2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r0 = r2; \
+ r0 -= 8; \
+ r4 = r2; \
+ r4 += 8; \
+ if r4 > r3 goto l0_%=; \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("meta access, test3")
+__failure __msg("invalid access to packet")
+__naked void meta_access_test3(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r0 = r2; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("meta access, test4")
+__failure __msg("invalid access to packet")
+__naked void meta_access_test4(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r4 = *(u32*)(r1 + %[xdp_md_data]); \
+ r0 = r4; \
+ r0 += 8; \
+ if r0 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("meta access, test5")
+__failure __msg("R3 !read_ok")
+__naked void meta_access_test5(void)
+{
+ asm volatile (" \
+ r3 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r4 = *(u32*)(r1 + %[xdp_md_data]); \
+ r0 = r3; \
+ r0 += 8; \
+ if r0 > r4 goto l0_%=; \
+ r2 = -8; \
+ call %[bpf_xdp_adjust_meta]; \
+ r0 = *(u8*)(r3 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_xdp_adjust_meta),
+ __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("meta access, test6")
+__failure __msg("invalid access to packet")
+__naked void meta_access_test6(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r0 = r3; \
+ r0 += 8; \
+ r4 = r2; \
+ r4 += 8; \
+ if r4 > r0 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("meta access, test7")
+__success __retval(0)
+__naked void meta_access_test7(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r0 = r3; \
+ r0 += 8; \
+ r4 = r2; \
+ r4 += 8; \
+ if r4 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("meta access, test8")
+__success __retval(0)
+__naked void meta_access_test8(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r4 = r2; \
+ r4 += 0xFFFF; \
+ if r4 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("meta access, test9")
+__failure __msg("invalid access to packet")
+__naked void meta_access_test9(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r4 = r2; \
+ r4 += 0xFFFF; \
+ r4 += 1; \
+ if r4 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("meta access, test10")
+__failure __msg("invalid access to packet")
+__naked void meta_access_test10(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r4 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r5 = 42; \
+ r6 = 24; \
+ *(u64*)(r10 - 8) = r5; \
+ lock *(u64 *)(r10 - 8) += r6; \
+ r5 = *(u64*)(r10 - 8); \
+ if r5 > 100 goto l0_%=; \
+ r3 += r5; \
+ r5 = r3; \
+ r6 = r2; \
+ r6 += 8; \
+ if r6 > r5 goto l0_%=; \
+ r2 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("meta access, test11")
+__success __retval(0)
+__naked void meta_access_test11(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r5 = 42; \
+ r6 = 24; \
+ *(u64*)(r10 - 8) = r5; \
+ lock *(u64 *)(r10 - 8) += r6; \
+ r5 = *(u64*)(r10 - 8); \
+ if r5 > 100 goto l0_%=; \
+ r2 += r5; \
+ r5 = r2; \
+ r6 = r2; \
+ r6 += 8; \
+ if r6 > r3 goto l0_%=; \
+ r5 = *(u8*)(r5 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("meta access, test12")
+__success __retval(0)
+__naked void meta_access_test12(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r4 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r5 = r3; \
+ r5 += 16; \
+ if r5 > r4 goto l0_%=; \
+ r0 = *(u8*)(r3 + 0); \
+ r5 = r2; \
+ r5 += 16; \
+ if r5 > r3 goto l0_%=; \
+ r0 = *(u8*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c
new file mode 100644
index 000000000000..a4d8814eb5ed
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
+
+SEC("socket")
+__description("MOV32SX, S8")
+__success __success_unpriv __retval(0x23)
+__naked void mov32sx_s8(void)
+{
+ asm volatile (" \
+ w0 = 0xff23; \
+ w0 = (s8)w0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S16")
+__success __success_unpriv __retval(0xFFFFff23)
+__naked void mov32sx_s16(void)
+{
+ asm volatile (" \
+ w0 = 0xff23; \
+ w0 = (s16)w0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S8")
+__success __success_unpriv __retval(-2)
+__naked void mov64sx_s8(void)
+{
+ asm volatile (" \
+ r0 = 0x1fe; \
+ r0 = (s8)r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S16")
+__success __success_unpriv __retval(0xf23)
+__naked void mov64sx_s16(void)
+{
+ asm volatile (" \
+ r0 = 0xf0f23; \
+ r0 = (s16)r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S32")
+__success __success_unpriv __retval(-1)
+__naked void mov64sx_s32(void)
+{
+ asm volatile (" \
+ r0 = 0xfffffffe; \
+ r0 = (s32)r0; \
+ r0 >>= 1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S8, range_check")
+__success __success_unpriv __retval(1)
+__naked void mov32sx_s8_range(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ w1 = (s8)w0; \
+ /* w1 with s8 range */ \
+ if w1 s> 0x7f goto l0_%=; \
+ if w1 s< -0x80 goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S16, range_check")
+__success __success_unpriv __retval(1)
+__naked void mov32sx_s16_range(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ w1 = (s16)w0; \
+ /* w1 with s16 range */ \
+ if w1 s> 0x7fff goto l0_%=; \
+ if w1 s< -0x80ff goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S16, range_check 2")
+__success __success_unpriv __retval(1)
+__naked void mov32sx_s16_range_2(void)
+{
+ asm volatile (" \
+ r1 = 65535; \
+ w2 = (s16)w1; \
+ r2 >>= 1; \
+ if r2 != 0x7fffFFFF goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 0; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S8, range_check")
+__success __success_unpriv __retval(1)
+__naked void mov64sx_s8_range(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = (s8)r0; \
+ /* r1 with s8 range */ \
+ if r1 s> 0x7f goto l0_%=; \
+ if r1 s< -0x80 goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S16, range_check")
+__success __success_unpriv __retval(1)
+__naked void mov64sx_s16_range(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = (s16)r0; \
+ /* r1 with s16 range */ \
+ if r1 s> 0x7fff goto l0_%=; \
+ if r1 s< -0x8000 goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S32, range_check")
+__success __success_unpriv __retval(1)
+__naked void mov64sx_s32_range(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = (s32)r0; \
+ /* r1 with s32 range */ \
+ if r1 s> 0x7fffffff goto l0_%=; \
+ if r1 s< -0x80000000 goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S16, R10 Sign Extension")
+__failure __msg("R1 type=scalar expected=fp, pkt, pkt_meta, map_key, map_value, mem, ringbuf_mem, buf, trusted_ptr_")
+__failure_unpriv __msg_unpriv("R10 sign-extension part of pointer")
+__naked void mov64sx_s16_r10(void)
+{
+ asm volatile (" \
+ r1 = 553656332; \
+ *(u32 *)(r10 - 8) = r1; \
+ r1 = (s16)r10; \
+ r1 += -8; \
+ r2 = 3; \
+ if r2 <= r1 goto l0_%=; \
+l0_%=: \
+ call %[bpf_trace_printk]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_trace_printk)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S8, var_off u32_max")
+__failure __msg("infinite loop detected")
+__failure_unpriv __msg_unpriv("back-edge from insn 2 to 0")
+__naked void mov64sx_s32_varoff_1(void)
+{
+ asm volatile (" \
+l0_%=: \
+ r3 = *(u8 *)(r10 -387); \
+ w7 = (s8)w3; \
+ if w7 >= 0x2533823b goto l0_%=; \
+ w0 = 0; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S8, var_off not u32_max, positive after s8 extension")
+__success __retval(0)
+__success_unpriv
+#ifdef SPEC_V1
+__xlated_unpriv("w0 = 0")
+__xlated_unpriv("exit")
+__xlated_unpriv("nospec") /* inserted to prevent `frame pointer is read only` */
+__xlated_unpriv("goto pc-1")
+#endif
+__naked void mov64sx_s32_varoff_2(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r3 = r0; \
+ r3 &= 0xf; \
+ w7 = (s8)w3; \
+ if w7 s>= 16 goto l0_%=; \
+ w0 = 0; \
+ exit; \
+l0_%=: \
+ r10 = 1; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S8, var_off not u32_max, negative after s8 extension")
+__success __retval(0)
+__success_unpriv
+#ifdef SPEC_V1
+__xlated_unpriv("w0 = 0")
+__xlated_unpriv("exit")
+__xlated_unpriv("nospec") /* inserted to prevent `frame pointer is read only` */
+__xlated_unpriv("goto pc-1")
+#endif
+__naked void mov64sx_s32_varoff_3(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r3 = r0; \
+ r3 &= 0xf; \
+ r3 |= 0x80; \
+ w7 = (s8)w3; \
+ if w7 s>= -5 goto l0_%=; \
+ w0 = 0; \
+ exit; \
+l0_%=: \
+ r10 = 1; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S8, unsigned range_check")
+__success __retval(0)
+__naked void mov64sx_s8_range_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x1; \
+ r0 += 0xfe; \
+ r0 = (s8)r0; \
+ if r0 < 0xfffffffffffffffe goto label_%=; \
+ r0 = 0; \
+ exit; \
+label_%=: \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S8, unsigned range_check")
+__success __retval(0)
+__naked void mov32sx_s8_range_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ w0 &= 0x1; \
+ w0 += 0xfe; \
+ w0 = (s8)w0; \
+ if w0 < 0xfffffffe goto label_%=; \
+ r0 = 0; \
+ exit; \
+label_%=: \
+ exit; \
+ " :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+#else
+
+SEC("socket")
+__description("cpuv4 is not supported by compiler or jit, use a dummy test")
+__success
+int dummy_test(void)
+{
+ return 0;
+}
+
+#endif
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_mtu.c b/tools/testing/selftests/bpf/progs/verifier_mtu.c
new file mode 100644
index 000000000000..256956ea1ac5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_mtu.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("tc/ingress")
+__description("uninit/mtu: write rejected")
+__success
+__caps_unpriv(CAP_BPF|CAP_NET_ADMIN)
+__failure_unpriv __msg_unpriv("invalid read from stack")
+int tc_uninit_mtu(struct __sk_buff *ctx)
+{
+ __u32 mtu;
+
+ bpf_check_mtu(ctx, 0, &mtu, 0, 0);
+ return TCX_PASS;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c
new file mode 100644
index 000000000000..ab9f9f2620ed
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+
+#include "bpf_misc.h"
+
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("netfilter")
+__description("netfilter invalid context access, size too short")
+__failure __msg("invalid bpf_context access")
+__naked void with_invalid_ctx_access_test1(void)
+{
+ asm volatile (" \
+ r2 = *(u8*)(r1 + %[__bpf_nf_ctx_state]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__bpf_nf_ctx_state, offsetof(struct bpf_nf_ctx, state))
+ : __clobber_all);
+}
+
+SEC("netfilter")
+__description("netfilter invalid context access, size too short")
+__failure __msg("invalid bpf_context access")
+__naked void with_invalid_ctx_access_test2(void)
+{
+ asm volatile (" \
+ r2 = *(u16*)(r1 + %[__bpf_nf_ctx_skb]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__bpf_nf_ctx_skb, offsetof(struct bpf_nf_ctx, skb))
+ : __clobber_all);
+}
+
+SEC("netfilter")
+__description("netfilter invalid context access, past end of ctx")
+__failure __msg("invalid bpf_context access")
+__naked void with_invalid_ctx_access_test3(void)
+{
+ asm volatile (" \
+ r2 = *(u64*)(r1 + %[__bpf_nf_ctx_size]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__bpf_nf_ctx_size, sizeof(struct bpf_nf_ctx))
+ : __clobber_all);
+}
+
+SEC("netfilter")
+__description("netfilter invalid context, write")
+__failure __msg("invalid bpf_context access")
+__naked void with_invalid_ctx_access_test4(void)
+{
+ asm volatile (" \
+ r2 = r1; \
+ *(u64*)(r2 + 0) = r1; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(__bpf_nf_ctx_skb, offsetof(struct bpf_nf_ctx, skb))
+ : __clobber_all);
+}
+
+#define NF_DROP 0
+#define NF_ACCEPT 1
+
+SEC("netfilter")
+__description("netfilter valid context read and invalid write")
+__failure __msg("only read is supported")
+int with_invalid_ctx_access_test5(struct bpf_nf_ctx *ctx)
+{
+ struct nf_hook_state *state = (void *)ctx->state;
+
+ state->sk = NULL;
+ return NF_ACCEPT;
+}
+
+extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
+ struct bpf_dynptr *ptr__uninit) __ksym;
+extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset,
+ void *buffer, uint32_t buffer__sz) __ksym;
+
+SEC("netfilter")
+__description("netfilter test prog with skb and state read access")
+__success __failure_unpriv
+__retval(0)
+int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx)
+{
+ struct __sk_buff *skb = (struct __sk_buff *)ctx->skb;
+ const struct nf_hook_state *state = ctx->state;
+ const struct iphdr *iph;
+ const struct tcphdr *th;
+ u8 buffer_iph[20] = {};
+ u8 buffer_th[40] = {};
+ struct bpf_dynptr ptr;
+ uint8_t ihl;
+
+ if (ctx->skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr))
+ return NF_ACCEPT;
+
+ iph = bpf_dynptr_slice(&ptr, 0, buffer_iph, sizeof(buffer_iph));
+ if (!iph)
+ return NF_ACCEPT;
+
+ if (state->pf != 2)
+ return NF_ACCEPT;
+
+ ihl = iph->ihl << 2;
+
+ th = bpf_dynptr_slice(&ptr, ihl, buffer_th, sizeof(buffer_th));
+ if (!th)
+ return NF_ACCEPT;
+
+ return th->dest == bpf_htons(22) ? NF_ACCEPT : NF_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c
new file mode 100644
index 000000000000..e1ffa5d32ff0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("netfilter")
+__description("bpf_exit with invalid return code. test1")
+__failure __msg("R0 is not a known value")
+__naked void with_invalid_return_code_test1(void)
+{
+ asm volatile (" \
+ r0 = *(u64*)(r1 + 0); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("netfilter")
+__description("bpf_exit with valid return code. test2")
+__success
+__naked void with_valid_return_code_test2(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("netfilter")
+__description("bpf_exit with valid return code. test3")
+__success
+__naked void with_valid_return_code_test3(void)
+{
+ asm volatile (" \
+ r0 = 1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("netfilter")
+__description("bpf_exit with invalid return code. test4")
+__failure __msg("R0 has smin=2 smax=2 should have been in [0, 1]")
+__naked void with_invalid_return_code_test4(void)
+{
+ asm volatile (" \
+ r0 = 2; \
+ exit; \
+" ::: __clobber_all);
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c b/tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c
new file mode 100644
index 000000000000..f37713a265ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("or_jmp32_k: bit ops + branch on unknown value")
+__failure
+__msg("R0 invalid mem access 'scalar'")
+__naked void or_jmp32_k(void)
+{
+ asm volatile (" \
+ r0 = 0xffffffff; \
+ r0 /= 1; \
+ r1 = 0; \
+ w1 = -1; \
+ w1 >>= 1; \
+ w0 &= w1; \
+ w0 |= 2; \
+ if w0 != 0x7ffffffd goto l1; \
+ r0 = 1; \
+ exit; \
+l3: \
+ r0 = 5; \
+ *(u64*)(r0 - 8) = r0; \
+ exit; \
+l2: \
+ w0 -= 0xe; \
+ if w0 == 1 goto l3; \
+ r0 = 4; \
+ exit; \
+l1: \
+ w0 -= 0x7ffffff0; \
+ if w0 s>= 0xe goto l2; \
+ r0 = 3; \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c
new file mode 100644
index 000000000000..73fee2aec698
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_precision.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 SUSE LLC */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8")
+__naked int bpf_neg(void)
+{
+ asm volatile (
+ "r2 = 8;"
+ "r2 = -r2;"
+ "if r2 != -8 goto 1f;"
+ "r1 = r10;"
+ "r1 += r2;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
+__naked int bpf_end_to_le(void)
+{
+ asm volatile (
+ "r2 = 0;"
+ "r2 = le16 r2;"
+ "if r2 != 0 goto 1f;"
+ "r1 = r10;"
+ "r1 += r2;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
+__naked int bpf_end_to_be(void)
+{
+ asm volatile (
+ "r2 = 0;"
+ "r2 = be16 r2;"
+ "if r2 != 0 goto 1f;"
+ "r1 = r10;"
+ "r1 += r2;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
+ __clang_major__ >= 18
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
+__naked int bpf_end_bswap(void)
+{
+ asm volatile (
+ "r2 = 0;"
+ "r2 = bswap16 r2;"
+ "if r2 != 0 goto 1f;"
+ "r1 = r10;"
+ "r1 += r2;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+#ifdef CAN_USE_LOAD_ACQ_STORE_REL
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (db) r2 = load_acquire((u64 *)(r10 -8))")
+__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
+__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
+__naked int bpf_load_acquire(void)
+{
+ asm volatile (
+ "r1 = 8;"
+ "*(u64 *)(r10 - 8) = r1;"
+ ".8byte %[load_acquire_insn];" /* r2 = load_acquire((u64 *)(r10 - 8)); */
+ "r3 = r10;"
+ "r3 += r2;" /* mark_precise */
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8))
+ : __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r2 = r10")
+__msg("mark_precise: frame0: regs=r1 stack= before 2: (79) r1 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: regs= stack=-8 before 1: (db) store_release((u64 *)(r10 -8), r1)")
+__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
+__naked int bpf_store_release(void)
+{
+ asm volatile (
+ "r1 = 8;"
+ ".8byte %[store_release_insn];" /* store_release((u64 *)(r10 - 8), r1); */
+ "r1 = *(u64 *)(r10 - 8);"
+ "r2 = r10;"
+ "r2 += r1;" /* mark_precise */
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8))
+ : __clobber_all);
+}
+
+#endif /* CAN_USE_LOAD_ACQ_STORE_REL */
+#endif /* v4 instruction */
+
+SEC("?raw_tp")
+__success __log_level(2)
+/*
+ * Without the bug fix there will be no history between "last_idx 3 first_idx 3"
+ * and "parent state regs=" lines. "R0_w=6" parts are here to help anchor
+ * expected log messages to the one specific mark_chain_precision operation.
+ *
+ * This is quite fragile: if verifier checkpointing heuristic changes, this
+ * might need adjusting.
+ */
+__msg("2: (07) r0 += 1 ; R0_w=6")
+__msg("3: (35) if r0 >= 0xa goto pc+1")
+__msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1")
+__msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4")
+__msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1")
+__msg("mark_precise: frame0: parent state regs= stack=: R0_rw=P4")
+__msg("3: R0_w=6")
+__naked int state_loop_first_last_equal(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "l0_%=:"
+ "r0 += 1;"
+ "r0 += 1;"
+ /* every few iterations we'll have a checkpoint here with
+ * first_idx == last_idx, potentially confusing precision
+ * backtracking logic
+ */
+ "if r0 >= 10 goto l1_%=;" /* checkpoint + mark_precise */
+ "goto l0_%=;"
+ "l1_%=:"
+ "exit;"
+ ::: __clobber_common
+ );
+}
+
+__used __naked static void __bpf_cond_op_r10(void)
+{
+ asm volatile (
+ "r2 = 2314885393468386424 ll;"
+ "goto +0;"
+ "if r2 <= r10 goto +3;"
+ "if r1 >= -1835016 goto +0;"
+ "if r2 <= 8 goto +0;"
+ "if r3 <= 0 goto +0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("8: (bd) if r2 <= r10 goto pc+3")
+__msg("9: (35) if r1 >= 0xffe3fff8 goto pc+0")
+__msg("10: (b5) if r2 <= 0x8 goto pc+0")
+__msg("mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0")
+__msg("mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3")
+__msg("mark_precise: frame1: regs=r2 stack= before 7: (05) goto pc+0")
+__naked void bpf_cond_op_r10(void)
+{
+ asm volatile (
+ "r3 = 0 ll;"
+ "call __bpf_cond_op_r10;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("3: (bf) r3 = r10")
+__msg("4: (bd) if r3 <= r2 goto pc+1")
+__msg("5: (b5) if r2 <= 0x8 goto pc+2")
+__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r2 stack= before 4: (bd) if r3 <= r2 goto pc+1")
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
+__naked void bpf_cond_op_not_r10(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "r2 = 2314885393468386424 ll;"
+ "r3 = r10;"
+ "if r3 <= r2 goto +1;"
+ "if r2 <= 8 goto +2;"
+ "r0 = 2 ll;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm.s/socket_connect")
+__success __log_level(2)
+__msg("0: (b7) r0 = 1 ; R0_w=1")
+__msg("1: (84) w0 = -w0 ; R0_w=0xffffffff")
+__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r0 stack= before 1: (84) w0 = -w0")
+__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1")
+__naked int bpf_neg_2(void)
+{
+ /*
+ * lsm.s/socket_connect requires a return value within [-4095, 0].
+ * Returning -1 is allowed
+ */
+ asm volatile (
+ "r0 = 1;"
+ "w0 = -w0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm.s/socket_connect")
+__failure __msg("At program exit the register R0 has")
+__naked int bpf_neg_3(void)
+{
+ /*
+ * lsm.s/socket_connect requires a return value within [-4095, 0].
+ * Returning -10000 is not allowed.
+ */
+ asm volatile (
+ "r0 = 10000;"
+ "w0 = -w0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm.s/socket_connect")
+__success __log_level(2)
+__msg("0: (b7) r0 = 1 ; R0_w=1")
+__msg("1: (87) r0 = -r0 ; R0_w=-1")
+__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r0 stack= before 1: (87) r0 = -r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1")
+__naked int bpf_neg_4(void)
+{
+ /*
+ * lsm.s/socket_connect requires a return value within [-4095, 0].
+ * Returning -1 is allowed
+ */
+ asm volatile (
+ "r0 = 1;"
+ "r0 = -r0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm.s/socket_connect")
+__failure __msg("At program exit the register R0 has")
+__naked int bpf_neg_5(void)
+{
+ /*
+ * lsm.s/socket_connect requires a return value within [-4095, 0].
+ * Returning -10000 is not allowed.
+ */
+ asm volatile (
+ "r0 = 10000;"
+ "r0 = -r0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c b/tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c
new file mode 100644
index 000000000000..8d27c780996f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/prevent_map_lookup.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} map_stacktrace SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 8);
+ __uint(key_size, sizeof(int));
+ __array(values, void (void));
+} map_prog2_socket SEC(".maps");
+
+SEC("perf_event")
+__description("prevent map lookup in stack trace")
+__failure __msg("cannot pass map_type 7 into func bpf_map_lookup_elem")
+__naked void map_lookup_in_stack_trace(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_stacktrace] ll; \
+ call %[bpf_map_lookup_elem]; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_stacktrace)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("prevent map lookup in prog array")
+__failure __msg("cannot pass map_type 3 into func bpf_map_lookup_elem")
+__failure_unpriv
+__naked void map_lookup_in_prog_array(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_prog2_socket] ll; \
+ call %[bpf_map_lookup_elem]; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_prog2_socket)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_private_stack.c b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
new file mode 100644
index 000000000000..1ecd34ebde19
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+/* From include/linux/filter.h */
+#define MAX_BPF_STACK 512
+
+#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
+
+struct elem {
+ struct bpf_timer t;
+ char pad[256];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+SEC("kprobe")
+__description("Private stack, single prog")
+__success
+__arch_x86_64
+__jited(" movabsq $0x{{.*}}, %r9")
+__jited(" addq %gs:{{.*}}, %r9")
+__jited(" movl $0x2a, %edi")
+__jited(" movq %rdi, -0x100(%r9)")
+__arch_arm64
+__jited(" stp x25, x27, [sp, {{.*}}]!")
+__jited(" mov x27, {{.*}}")
+__jited(" movk x27, {{.*}}, lsl #16")
+__jited(" movk x27, {{.*}}")
+__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
+__jited(" add x27, x27, x10")
+__jited(" add x25, x27, {{.*}}")
+__jited(" mov x0, #0x2a")
+__jited(" str x0, [x27]")
+__jited("...")
+__jited(" ldp x25, x27, [sp], {{.*}}")
+__naked void private_stack_single_prog(void)
+{
+ asm volatile (" \
+ r1 = 42; \
+ *(u64 *)(r10 - 256) = r1; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("raw_tp")
+__description("No private stack")
+__success
+__arch_x86_64
+__jited(" subq $0x8, %rsp")
+__arch_arm64
+__jited(" mov x25, sp")
+__jited(" sub sp, sp, #0x10")
+__naked void no_private_stack_nested(void)
+{
+ asm volatile (" \
+ r1 = 42; \
+ *(u64 *)(r10 - 8) = r1; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+__used
+__naked static void cumulative_stack_depth_subprog(void)
+{
+ asm volatile (" \
+ r1 = 41; \
+ *(u64 *)(r10 - 32) = r1; \
+ call %[bpf_get_smp_processor_id]; \
+ exit; \
+" :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("kprobe")
+__description("Private stack, subtree > MAX_BPF_STACK")
+__success
+__arch_x86_64
+/* private stack fp for the main prog */
+__jited(" movabsq $0x{{.*}}, %r9")
+__jited(" addq %gs:{{.*}}, %r9")
+__jited(" movl $0x2a, %edi")
+__jited(" movq %rdi, -0x200(%r9)")
+__jited(" pushq %r9")
+__jited(" callq 0x{{.*}}")
+__jited(" popq %r9")
+__jited(" xorl %eax, %eax")
+__arch_arm64
+__jited(" stp x25, x27, [sp, {{.*}}]!")
+__jited(" mov x27, {{.*}}")
+__jited(" movk x27, {{.*}}, lsl #16")
+__jited(" movk x27, {{.*}}")
+__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
+__jited(" add x27, x27, x10")
+__jited(" add x25, x27, {{.*}}")
+__jited(" mov x0, #0x2a")
+__jited(" str x0, [x27]")
+__jited(" bl {{.*}}")
+__jited("...")
+__jited(" ldp x25, x27, [sp], {{.*}}")
+__naked void private_stack_nested_1(void)
+{
+ asm volatile (" \
+ r1 = 42; \
+ *(u64 *)(r10 - %[max_bpf_stack]) = r1; \
+ call cumulative_stack_depth_subprog; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(max_bpf_stack, MAX_BPF_STACK)
+ : __clobber_all);
+}
+
+__naked __noinline __used
+static unsigned long loop_callback(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 42; \
+ *(u64 *)(r10 - 512) = r1; \
+ call cumulative_stack_depth_subprog; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_common);
+}
+
+SEC("raw_tp")
+__description("Private stack, callback")
+__success
+__arch_x86_64
+/* for func loop_callback */
+__jited("func #1")
+__jited(" endbr64")
+__jited(" nopl (%rax,%rax)")
+__jited(" nopl (%rax)")
+__jited(" pushq %rbp")
+__jited(" movq %rsp, %rbp")
+__jited(" endbr64")
+__jited(" movabsq $0x{{.*}}, %r9")
+__jited(" addq %gs:{{.*}}, %r9")
+__jited(" pushq %r9")
+__jited(" callq")
+__jited(" popq %r9")
+__jited(" movl $0x2a, %edi")
+__jited(" movq %rdi, -0x200(%r9)")
+__jited(" pushq %r9")
+__jited(" callq")
+__jited(" popq %r9")
+__arch_arm64
+__jited("func #1")
+__jited("...")
+__jited(" stp x25, x27, [sp, {{.*}}]!")
+__jited(" mov x27, {{.*}}")
+__jited(" movk x27, {{.*}}, lsl #16")
+__jited(" movk x27, {{.*}}")
+__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
+__jited(" add x27, x27, x10")
+__jited(" add x25, x27, {{.*}}")
+__jited(" bl 0x{{.*}}")
+__jited(" add x7, x0, #0x0")
+__jited(" mov x0, #0x2a")
+__jited(" str x0, [x27]")
+__jited(" bl 0x{{.*}}")
+__jited(" add x7, x0, #0x0")
+__jited(" mov x7, #0x0")
+__jited(" ldp x25, x27, [sp], {{.*}}")
+__naked void private_stack_callback(void)
+{
+ asm volatile (" \
+ r1 = 1; \
+ r2 = %[loop_callback]; \
+ r3 = 0; \
+ r4 = 0; \
+ call %[bpf_loop]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_ptr(loop_callback),
+ __imm(bpf_loop)
+ : __clobber_common);
+}
+
+SEC("fentry/bpf_fentry_test9")
+__description("Private stack, exception in main prog")
+__success __retval(0)
+__arch_x86_64
+__jited(" pushq %r9")
+__jited(" callq")
+__jited(" popq %r9")
+__arch_arm64
+__jited(" stp x29, x30, [sp, #-0x10]!")
+__jited(" mov x29, sp")
+__jited(" stp xzr, x26, [sp, #-0x10]!")
+__jited(" mov x26, sp")
+__jited(" stp x19, x20, [sp, #-0x10]!")
+__jited(" stp x21, x22, [sp, #-0x10]!")
+__jited(" stp x23, x24, [sp, #-0x10]!")
+__jited(" stp x25, x26, [sp, #-0x10]!")
+__jited(" stp x27, x28, [sp, #-0x10]!")
+__jited(" mov x27, {{.*}}")
+__jited(" movk x27, {{.*}}, lsl #16")
+__jited(" movk x27, {{.*}}")
+__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
+__jited(" add x27, x27, x10")
+__jited(" add x25, x27, {{.*}}")
+__jited(" mov x0, #0x2a")
+__jited(" str x0, [x27]")
+__jited(" mov x0, #0x0")
+__jited(" bl 0x{{.*}}")
+__jited(" add x7, x0, #0x0")
+__jited(" ldp x27, x28, [sp], #0x10")
+int private_stack_exception_main_prog(void)
+{
+ asm volatile (" \
+ r1 = 42; \
+ *(u64 *)(r10 - 512) = r1; \
+" ::: __clobber_common);
+
+ bpf_throw(0);
+ return 0;
+}
+
+__used static int subprog_exception(void)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test9")
+__description("Private stack, exception in subprog")
+__success __retval(0)
+__arch_x86_64
+__jited(" movq %rdi, -0x200(%r9)")
+__jited(" pushq %r9")
+__jited(" callq")
+__jited(" popq %r9")
+__arch_arm64
+__jited(" stp x27, x28, [sp, #-0x10]!")
+__jited(" mov x27, {{.*}}")
+__jited(" movk x27, {{.*}}, lsl #16")
+__jited(" movk x27, {{.*}}")
+__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
+__jited(" add x27, x27, x10")
+__jited(" add x25, x27, {{.*}}")
+__jited(" mov x0, #0x2a")
+__jited(" str x0, [x27]")
+__jited(" bl 0x{{.*}}")
+__jited(" add x7, x0, #0x0")
+__jited(" ldp x27, x28, [sp], #0x10")
+int private_stack_exception_sub_prog(void)
+{
+ asm volatile (" \
+ r1 = 42; \
+ *(u64 *)(r10 - 512) = r1; \
+ call subprog_exception; \
+" ::: __clobber_common);
+
+ return 0;
+}
+
+int glob;
+__noinline static void subprog2(int *val)
+{
+ glob += val[0] * 2;
+}
+
+__noinline static void subprog1(int *val)
+{
+ int tmp[64] = {};
+
+ tmp[0] = *val;
+ subprog2(tmp);
+}
+
+__noinline static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
+{
+ subprog1(key);
+ return 0;
+}
+
+__noinline static int timer_cb2(void *map, int *key, struct bpf_timer *timer)
+{
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test9")
+__description("Private stack, async callback, not nested")
+__success __retval(0)
+__arch_x86_64
+__jited(" movabsq $0x{{.*}}, %r9")
+__arch_arm64
+__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
+__jited(" add x27, x27, x10")
+__jited(" add x25, x27, {{.*}}")
+int private_stack_async_callback_1(void)
+{
+ struct bpf_timer *arr_timer;
+ int array_key = 0;
+
+ arr_timer = bpf_map_lookup_elem(&array, &array_key);
+ if (!arr_timer)
+ return 0;
+
+ bpf_timer_init(arr_timer, &array, 1);
+ bpf_timer_set_callback(arr_timer, timer_cb2);
+ bpf_timer_start(arr_timer, 0, 0);
+ subprog1(&array_key);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test9")
+__description("Private stack, async callback, potential nesting")
+__success __retval(0)
+__arch_x86_64
+__jited(" subq $0x100, %rsp")
+__arch_arm64
+__jited(" sub sp, sp, #0x100")
+int private_stack_async_callback_2(void)
+{
+ struct bpf_timer *arr_timer;
+ int array_key = 0;
+
+ arr_timer = bpf_map_lookup_elem(&array, &array_key);
+ if (!arr_timer)
+ return 0;
+
+ bpf_timer_init(arr_timer, &array, 1);
+ bpf_timer_set_callback(arr_timer, timer_cb1);
+ bpf_timer_start(arr_timer, 0, 0);
+ subprog1(&array_key);
+ return 0;
+}
+
+#else
+
+SEC("kprobe")
+__description("private stack is not supported, use a dummy test")
+__success
+int dummy_test(void)
+{
+ return 0;
+}
+
+#endif
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
new file mode 100644
index 000000000000..c689665e07b9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/raw_stack.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("raw_stack: no skb_load_bytes")
+__success
+__failure_unpriv __msg_unpriv("invalid read from stack R6 off=-8 size=8")
+__naked void stack_no_skb_load_bytes(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -8; \
+ r3 = r6; \
+ r4 = 8; \
+ /* Call to skb_load_bytes() omitted. */ \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, negative len")
+__failure __msg("R4 min value is negative")
+__naked void skb_load_bytes_negative_len(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -8; \
+ r3 = r6; \
+ r4 = -8; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, negative len 2")
+__failure __msg("R4 min value is negative")
+__naked void load_bytes_negative_len_2(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -8; \
+ r3 = r6; \
+ r4 = %[__imm_0]; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes),
+ __imm_const(__imm_0, ~0)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, zero len")
+__failure __msg("R4 invalid zero-sized read: u64=[0,0]")
+__naked void skb_load_bytes_zero_len(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -8; \
+ r3 = r6; \
+ r4 = 0; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, no init")
+__success __retval(0)
+__naked void skb_load_bytes_no_init(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -8; \
+ r3 = r6; \
+ r4 = 8; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, init")
+__success __retval(0)
+__naked void stack_skb_load_bytes_init(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -8; \
+ r3 = 0xcafe; \
+ *(u64*)(r6 + 0) = r3; \
+ r3 = r6; \
+ r4 = 8; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, spilled regs around bounds")
+__success __retval(0)
+__naked void bytes_spilled_regs_around_bounds(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -16; \
+ *(u64*)(r6 - 8) = r1; \
+ *(u64*)(r6 + 8) = r1; \
+ r3 = r6; \
+ r4 = 8; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 - 8); \
+ r2 = *(u64*)(r6 + 8); \
+ r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
+ r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
+ r0 += r2; \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
+ __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, spilled regs corruption")
+__failure __msg("R0 invalid mem access 'scalar'")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void load_bytes_spilled_regs_corruption(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -8; \
+ *(u64*)(r6 + 0) = r1; \
+ r3 = r6; \
+ r4 = 8; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, spilled regs corruption 2")
+__failure __msg("R3 invalid mem access 'scalar'")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void bytes_spilled_regs_corruption_2(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -16; \
+ *(u64*)(r6 - 8) = r1; \
+ *(u64*)(r6 + 0) = r1; \
+ *(u64*)(r6 + 8) = r1; \
+ r3 = r6; \
+ r4 = 8; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 - 8); \
+ r2 = *(u64*)(r6 + 8); \
+ r3 = *(u64*)(r6 + 0); \
+ r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
+ r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
+ r0 += r2; \
+ r3 = *(u32*)(r3 + %[__sk_buff_pkt_type]); \
+ r0 += r3; \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
+ __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
+ __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, spilled regs + data")
+__success __retval(0)
+__naked void load_bytes_spilled_regs_data(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -16; \
+ *(u64*)(r6 - 8) = r1; \
+ *(u64*)(r6 + 0) = r1; \
+ *(u64*)(r6 + 8) = r1; \
+ r3 = r6; \
+ r4 = 8; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 - 8); \
+ r2 = *(u64*)(r6 + 8); \
+ r3 = *(u64*)(r6 + 0); \
+ r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
+ r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
+ r0 += r2; \
+ r0 += r3; \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
+ __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, invalid access 1")
+__failure __msg("invalid write to stack R3 off=-513 size=8")
+__naked void load_bytes_invalid_access_1(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -513; \
+ r3 = r6; \
+ r4 = 8; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, invalid access 2")
+__failure __msg("invalid write to stack R3 off=-1 size=8")
+__naked void load_bytes_invalid_access_2(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -1; \
+ r3 = r6; \
+ r4 = 8; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, invalid access 3")
+__failure __msg("R4 min value is negative")
+__naked void load_bytes_invalid_access_3(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += 0xffffffff; \
+ r3 = r6; \
+ r4 = 0xffffffff; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, invalid access 4")
+__failure
+__msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
+__naked void load_bytes_invalid_access_4(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -1; \
+ r3 = r6; \
+ r4 = 0x7fffffff; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, invalid access 5")
+__failure
+__msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
+__naked void load_bytes_invalid_access_5(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -512; \
+ r3 = r6; \
+ r4 = 0x7fffffff; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, invalid access 6")
+__failure __msg("invalid zero-sized read")
+__naked void load_bytes_invalid_access_6(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -512; \
+ r3 = r6; \
+ r4 = 0; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("raw_stack: skb_load_bytes, large access")
+__success __retval(0)
+__naked void skb_load_bytes_large_access(void)
+{
+ asm volatile (" \
+ r2 = 4; \
+ r6 = r10; \
+ r6 += -512; \
+ r3 = r6; \
+ r4 = 512; \
+ call %[bpf_skb_load_bytes]; \
+ r0 = *(u64*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skb_load_bytes)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c b/tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c
new file mode 100644
index 000000000000..14a0172e2141
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/raw_tp_writable.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("raw_tracepoint.w")
+__description("raw_tracepoint_writable: reject variable offset")
+__failure
+__msg("R6 invalid variable buffer offset: off=0, var_off=(0x0; 0xffffffff)")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void tracepoint_writable_reject_variable_offset(void)
+{
+ asm volatile (" \
+ /* r6 is our tp buffer */ \
+ r6 = *(u64*)(r1 + 0); \
+ r1 = %[map_hash_8b] ll; \
+ /* move the key (== 0) to r10-8 */ \
+ w0 = 0; \
+ r2 = r10; \
+ r2 += -8; \
+ *(u64*)(r2 + 0) = r0; \
+ /* lookup in the map */ \
+ call %[bpf_map_lookup_elem]; \
+ /* exit clean if null */ \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: /* shift the buffer pointer to a variable location */\
+ r0 = *(u32*)(r0 + 0); \
+ r6 += r0; \
+ /* clobber whatever's there */ \
+ r7 = 4242; \
+ *(u64*)(r6 + 0) = r7; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c
new file mode 100644
index 000000000000..910365201f68
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c
@@ -0,0 +1,1495 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/ref_tracking.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+#define BPF_SK_LOOKUP(func) \
+ /* struct bpf_sock_tuple tuple = {} */ \
+ "r2 = 0;" \
+ "*(u32*)(r10 - 8) = r2;" \
+ "*(u64*)(r10 - 16) = r2;" \
+ "*(u64*)(r10 - 24) = r2;" \
+ "*(u64*)(r10 - 32) = r2;" \
+ "*(u64*)(r10 - 40) = r2;" \
+ "*(u64*)(r10 - 48) = r2;" \
+ /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
+ "r2 = r10;" \
+ "r2 += -48;" \
+ "r3 = %[sizeof_bpf_sock_tuple];"\
+ "r4 = 0;" \
+ "r5 = 0;" \
+ "call %[" #func "];"
+
+struct bpf_key {} __attribute__((preserve_access_index));
+
+extern void bpf_key_put(struct bpf_key *key) __ksym;
+extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
+extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym;
+
+/* BTF FUNC records are not generated for kfuncs referenced
+ * from inline assembly. These records are necessary for
+ * libbpf to link the program. The function below is a hack
+ * to ensure that BTF FUNC records are generated.
+ */
+void __kfunc_btf_root(void)
+{
+ bpf_key_put(0);
+ bpf_lookup_system_key(0);
+ bpf_lookup_user_key(0, 0);
+}
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct test_val);
+} map_array_48b SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
+} map_ringbuf SEC(".maps");
+
+void dummy_prog_42_tc(void);
+void dummy_prog_24_tc(void);
+void dummy_prog_loop1_tc(void);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 4);
+ __uint(key_size, sizeof(int));
+ __array(values, void (void));
+} map_prog1_tc SEC(".maps") = {
+ .values = {
+ [0] = (void *)&dummy_prog_42_tc,
+ [1] = (void *)&dummy_prog_loop1_tc,
+ [2] = (void *)&dummy_prog_24_tc,
+ },
+};
+
+SEC("tc")
+__auxiliary
+__naked void dummy_prog_42_tc(void)
+{
+ asm volatile ("r0 = 42; exit;");
+}
+
+SEC("tc")
+__auxiliary
+__naked void dummy_prog_24_tc(void)
+{
+ asm volatile ("r0 = 24; exit;");
+}
+
+SEC("tc")
+__auxiliary
+__naked void dummy_prog_loop1_tc(void)
+{
+ asm volatile (" \
+ r3 = 1; \
+ r2 = %[map_prog1_tc] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 41; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_tc)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: leak potential reference")
+__failure __msg("Unreleased reference")
+__naked void reference_tracking_leak_potential_reference(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r6 = r0; /* leak reference */ \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: leak potential reference to sock_common")
+__failure __msg("Unreleased reference")
+__naked void potential_reference_to_sock_common_1(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
+" r6 = r0; /* leak reference */ \
+ exit; \
+" :
+ : __imm(bpf_skc_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: leak potential reference on stack")
+__failure __msg("Unreleased reference")
+__naked void leak_potential_reference_on_stack(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r4 = r10; \
+ r4 += -8; \
+ *(u64*)(r4 + 0) = r0; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: leak potential reference on stack 2")
+__failure __msg("Unreleased reference")
+__naked void potential_reference_on_stack_2(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r4 = r10; \
+ r4 += -8; \
+ *(u64*)(r4 + 0) = r0; \
+ r0 = 0; \
+ r1 = 0; \
+ *(u64*)(r4 + 0) = r1; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: zero potential reference")
+__failure __msg("Unreleased reference")
+__naked void reference_tracking_zero_potential_reference(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r0 = 0; /* leak reference */ \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: zero potential reference to sock_common")
+__failure __msg("Unreleased reference")
+__naked void potential_reference_to_sock_common_2(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
+" r0 = 0; /* leak reference */ \
+ exit; \
+" :
+ : __imm(bpf_skc_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: copy and zero potential references")
+__failure __msg("Unreleased reference")
+__naked void copy_and_zero_potential_references(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r7 = r0; \
+ r0 = 0; \
+ r7 = 0; /* leak reference */ \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("lsm.s/bpf")
+__description("reference tracking: acquire/release user key reference")
+__success
+__naked void acquire_release_user_key_reference(void)
+{
+ asm volatile (" \
+ r1 = -3; \
+ r2 = 0; \
+ call %[bpf_lookup_user_key]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ call %[bpf_key_put]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_key_put),
+ __imm(bpf_lookup_user_key)
+ : __clobber_all);
+}
+
+SEC("lsm.s/bpf")
+__description("reference tracking: acquire/release system key reference")
+__success
+__naked void acquire_release_system_key_reference(void)
+{
+ asm volatile (" \
+ r1 = 1; \
+ call %[bpf_lookup_system_key]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r0; \
+ call %[bpf_key_put]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_key_put),
+ __imm(bpf_lookup_system_key)
+ : __clobber_all);
+}
+
+SEC("lsm.s/bpf")
+__description("reference tracking: release user key reference without check")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__naked void user_key_reference_without_check(void)
+{
+ asm volatile (" \
+ r1 = -3; \
+ r2 = 0; \
+ call %[bpf_lookup_user_key]; \
+ r1 = r0; \
+ call %[bpf_key_put]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_key_put),
+ __imm(bpf_lookup_user_key)
+ : __clobber_all);
+}
+
+SEC("lsm.s/bpf")
+__description("reference tracking: release system key reference without check")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__naked void system_key_reference_without_check(void)
+{
+ asm volatile (" \
+ r1 = 1; \
+ call %[bpf_lookup_system_key]; \
+ r1 = r0; \
+ call %[bpf_key_put]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_key_put),
+ __imm(bpf_lookup_system_key)
+ : __clobber_all);
+}
+
+SEC("lsm.s/bpf")
+__description("reference tracking: release with NULL key pointer")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__naked void release_with_null_key_pointer(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ call %[bpf_key_put]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_key_put)
+ : __clobber_all);
+}
+
+SEC("lsm.s/bpf")
+__description("reference tracking: leak potential reference to user key")
+__failure __msg("Unreleased reference")
+__naked void potential_reference_to_user_key(void)
+{
+ asm volatile (" \
+ r1 = -3; \
+ r2 = 0; \
+ call %[bpf_lookup_user_key]; \
+ exit; \
+" :
+ : __imm(bpf_lookup_user_key)
+ : __clobber_all);
+}
+
+SEC("lsm.s/bpf")
+__description("reference tracking: leak potential reference to system key")
+__failure __msg("Unreleased reference")
+__naked void potential_reference_to_system_key(void)
+{
+ asm volatile (" \
+ r1 = 1; \
+ call %[bpf_lookup_system_key]; \
+ exit; \
+" :
+ : __imm(bpf_lookup_system_key)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: release reference without check")
+__failure __msg("type=sock_or_null expected=sock")
+__naked void tracking_release_reference_without_check(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" /* reference in r0 may be NULL */ \
+ r1 = r0; \
+ r2 = 0; \
+ call %[bpf_sk_release]; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: release reference to sock_common without check")
+__failure __msg("type=sock_common_or_null expected=sock")
+__naked void to_sock_common_without_check(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
+" /* reference in r0 may be NULL */ \
+ r1 = r0; \
+ r2 = 0; \
+ call %[bpf_sk_release]; \
+ exit; \
+" :
+ : __imm(bpf_sk_release),
+ __imm(bpf_skc_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: release reference")
+__success __retval(0)
+__naked void reference_tracking_release_reference(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r1 = r0; \
+ if r0 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: release reference to sock_common")
+__success __retval(0)
+__naked void release_reference_to_sock_common(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
+" r1 = r0; \
+ if r0 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_release),
+ __imm(bpf_skc_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: release reference 2")
+__success __retval(0)
+__naked void reference_tracking_release_reference_2(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r1 = r0; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: call %[bpf_sk_release]; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: release reference twice")
+__failure __msg("type=scalar expected=sock")
+__naked void reference_tracking_release_reference_twice(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r1 = r0; \
+ r6 = r0; \
+ if r0 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: release reference twice inside branch")
+__failure __msg("type=scalar expected=sock")
+__naked void release_reference_twice_inside_branch(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r1 = r0; \
+ r6 = r0; \
+ if r0 == 0 goto l0_%=; /* goto end */ \
+ call %[bpf_sk_release]; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: alloc, check, free in one subbranch")
+__failure __msg("Unreleased reference")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void check_free_in_one_subbranch(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 16; \
+ /* if (offsetof(skb, mark) > data_len) exit; */ \
+ if r0 <= r3 goto l0_%=; \
+ exit; \
+l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" if r6 == 0 goto l1_%=; /* mark == 0? */\
+ /* Leak reference in R0 */ \
+ exit; \
+l1_%=: if r0 == 0 goto l2_%=; /* sk NULL? */ \
+ r1 = r0; \
+ call %[bpf_sk_release]; \
+l2_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: alloc, check, free in both subbranches")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void check_free_in_both_subbranches(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 16; \
+ /* if (offsetof(skb, mark) > data_len) exit; */ \
+ if r0 <= r3 goto l0_%=; \
+ exit; \
+l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" if r6 == 0 goto l1_%=; /* mark == 0? */\
+ if r0 == 0 goto l2_%=; /* sk NULL? */ \
+ r1 = r0; \
+ call %[bpf_sk_release]; \
+l2_%=: exit; \
+l1_%=: if r0 == 0 goto l3_%=; /* sk NULL? */ \
+ r1 = r0; \
+ call %[bpf_sk_release]; \
+l3_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking in call: free reference in subprog")
+__success __retval(0)
+__naked void call_free_reference_in_subprog(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r1 = r0; /* unchecked reference */ \
+ call call_free_reference_in_subprog__1; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void call_free_reference_in_subprog__1(void)
+{
+ asm volatile (" \
+ /* subprog 1 */ \
+ r2 = r1; \
+ if r2 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_release)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking in call: free reference in subprog and outside")
+__failure __msg("type=scalar expected=sock")
+__naked void reference_in_subprog_and_outside(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r1 = r0; /* unchecked reference */ \
+ r6 = r0; \
+ call reference_in_subprog_and_outside__1; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void reference_in_subprog_and_outside__1(void)
+{
+ asm volatile (" \
+ /* subprog 1 */ \
+ r2 = r1; \
+ if r2 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_release)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking in call: alloc & leak reference in subprog")
+__failure __msg("Unreleased reference")
+__naked void alloc_leak_reference_in_subprog(void)
+{
+ asm volatile (" \
+ r4 = r10; \
+ r4 += -8; \
+ call alloc_leak_reference_in_subprog__1; \
+ r1 = r0; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void alloc_leak_reference_in_subprog__1(void)
+{
+ asm volatile (" \
+ /* subprog 1 */ \
+ r6 = r4; \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" /* spill unchecked sk_ptr into stack of caller */\
+ *(u64*)(r6 + 0) = r0; \
+ r1 = r0; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking in call: alloc in subprog, release outside")
+__success __retval(POINTER_VALUE)
+__naked void alloc_in_subprog_release_outside(void)
+{
+ asm volatile (" \
+ r4 = r10; \
+ call alloc_in_subprog_release_outside__1; \
+ r1 = r0; \
+ if r0 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_release)
+ : __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void alloc_in_subprog_release_outside__1(void)
+{
+ asm volatile (" \
+ /* subprog 1 */ \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" exit; /* return sk */ \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking in call: sk_ptr leak into caller stack")
+__failure __msg("Unreleased reference")
+__naked void ptr_leak_into_caller_stack(void)
+{
+ asm volatile (" \
+ r4 = r10; \
+ r4 += -8; \
+ call ptr_leak_into_caller_stack__1; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void ptr_leak_into_caller_stack__1(void)
+{
+ asm volatile (" \
+ /* subprog 1 */ \
+ r5 = r10; \
+ r5 += -8; \
+ *(u64*)(r5 + 0) = r4; \
+ call ptr_leak_into_caller_stack__2; \
+ /* spill unchecked sk_ptr into stack of caller */\
+ r5 = r10; \
+ r5 += -8; \
+ r4 = *(u64*)(r5 + 0); \
+ *(u64*)(r4 + 0) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void ptr_leak_into_caller_stack__2(void)
+{
+ asm volatile (" \
+ /* subprog 2 */ \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking in call: sk_ptr spill into caller stack")
+__success __retval(0)
+__naked void ptr_spill_into_caller_stack(void)
+{
+ asm volatile (" \
+ r4 = r10; \
+ r4 += -8; \
+ call ptr_spill_into_caller_stack__1; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void ptr_spill_into_caller_stack__1(void)
+{
+ asm volatile (" \
+ /* subprog 1 */ \
+ r5 = r10; \
+ r5 += -8; \
+ *(u64*)(r5 + 0) = r4; \
+ call ptr_spill_into_caller_stack__2; \
+ /* spill unchecked sk_ptr into stack of caller */\
+ r5 = r10; \
+ r5 += -8; \
+ r4 = *(u64*)(r5 + 0); \
+ *(u64*)(r4 + 0) = r0; \
+ if r0 == 0 goto l0_%=; \
+ /* now the sk_ptr is verified, free the reference */\
+ r1 = *(u64*)(r4 + 0); \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_release)
+ : __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void ptr_spill_into_caller_stack__2(void)
+{
+ asm volatile (" \
+ /* subprog 2 */ \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: allow LD_ABS")
+__success __retval(0)
+__naked void reference_tracking_allow_ld_abs(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r1 = r0; \
+ if r0 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: r0 = *(u8*)skb[0]; \
+ r0 = *(u16*)skb[0]; \
+ r0 = *(u32*)skb[0]; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: forbid LD_ABS while holding reference")
+__failure __msg("BPF_LD_[ABS|IND] would lead to reference leak")
+__naked void ld_abs_while_holding_reference(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r0 = *(u8*)skb[0]; \
+ r0 = *(u16*)skb[0]; \
+ r0 = *(u32*)skb[0]; \
+ r1 = r0; \
+ if r0 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: allow LD_IND")
+__success __retval(1)
+__naked void reference_tracking_allow_ld_ind(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r1 = r0; \
+ if r0 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: r7 = 1; \
+ .8byte %[ld_ind]; \
+ r0 = r7; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)),
+ __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: forbid LD_IND while holding reference")
+__failure __msg("BPF_LD_[ABS|IND] would lead to reference leak")
+__naked void ld_ind_while_holding_reference(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r4 = r0; \
+ r7 = 1; \
+ .8byte %[ld_ind]; \
+ r0 = r7; \
+ r1 = r4; \
+ if r1 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)),
+ __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: check reference or tail call")
+__success __retval(0)
+__naked void check_reference_or_tail_call(void)
+{
+ asm volatile (" \
+ r7 = r1; \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" /* if (sk) bpf_sk_release() */ \
+ r1 = r0; \
+ if r1 != 0 goto l0_%=; \
+ /* bpf_tail_call() */ \
+ r3 = 3; \
+ r2 = %[map_prog1_tc] ll; \
+ r1 = r7; \
+ call %[bpf_tail_call]; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_release]; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm(bpf_tail_call),
+ __imm_addr(map_prog1_tc),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: release reference then tail call")
+__success __retval(0)
+__naked void release_reference_then_tail_call(void)
+{
+ asm volatile (" \
+ r7 = r1; \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" /* if (sk) bpf_sk_release() */ \
+ r1 = r0; \
+ if r1 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: /* bpf_tail_call() */ \
+ r3 = 3; \
+ r2 = %[map_prog1_tc] ll; \
+ r1 = r7; \
+ call %[bpf_tail_call]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm(bpf_tail_call),
+ __imm_addr(map_prog1_tc),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: leak possible reference over tail call")
+__failure __msg("tail_call would lead to reference leak")
+__naked void possible_reference_over_tail_call(void)
+{
+ asm volatile (" \
+ r7 = r1; \
+ /* Look up socket and store in REG_6 */ \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" /* bpf_tail_call() */ \
+ r6 = r0; \
+ r3 = 3; \
+ r2 = %[map_prog1_tc] ll; \
+ r1 = r7; \
+ call %[bpf_tail_call]; \
+ r0 = 0; \
+ /* if (sk) bpf_sk_release() */ \
+ r1 = r6; \
+ if r1 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm(bpf_tail_call),
+ __imm_addr(map_prog1_tc),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: leak checked reference over tail call")
+__failure __msg("tail_call would lead to reference leak")
+__naked void checked_reference_over_tail_call(void)
+{
+ asm volatile (" \
+ r7 = r1; \
+ /* Look up socket and store in REG_6 */ \
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r6 = r0; \
+ /* if (!sk) goto end */ \
+ if r0 == 0 goto l0_%=; \
+ /* bpf_tail_call() */ \
+ r3 = 0; \
+ r2 = %[map_prog1_tc] ll; \
+ r1 = r7; \
+ call %[bpf_tail_call]; \
+ r0 = 0; \
+ r1 = r6; \
+l0_%=: call %[bpf_sk_release]; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm(bpf_tail_call),
+ __imm_addr(map_prog1_tc),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: mangle and release sock_or_null")
+__failure __msg("R1 pointer arithmetic on sock_or_null prohibited")
+__naked void and_release_sock_or_null(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r1 = r0; \
+ r1 += 5; \
+ if r0 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: mangle and release sock")
+__failure __msg("R1 pointer arithmetic on sock prohibited")
+__naked void tracking_mangle_and_release_sock(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r1 = r0; \
+ if r0 == 0 goto l0_%=; \
+ r1 += 5; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: access member")
+__success __retval(0)
+__naked void reference_tracking_access_member(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r6 = r0; \
+ if r0 == 0 goto l0_%=; \
+ r2 = *(u32*)(r0 + 4); \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: write to member")
+__failure __msg("cannot write into sock")
+__naked void reference_tracking_write_to_member(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r6 = r0; \
+ if r0 == 0 goto l0_%=; \
+ r1 = r6; \
+ r2 = 42 ll; \
+ *(u32*)(r1 + %[bpf_sock_mark]) = r2; \
+ r1 = r6; \
+l0_%=: call %[bpf_sk_release]; \
+ r0 = 0 ll; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: invalid 64-bit access of member")
+__failure __msg("invalid sock access off=0 size=8")
+__naked void _64_bit_access_of_member(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r6 = r0; \
+ if r0 == 0 goto l0_%=; \
+ r2 = *(u64*)(r0 + 0); \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: access after release")
+__failure __msg("!read_ok")
+__naked void reference_tracking_access_after_release(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r1 = r0; \
+ if r0 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+ r2 = *(u32*)(r1 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: direct access for lookup")
+__success __retval(0)
+__naked void tracking_direct_access_for_lookup(void)
+{
+ asm volatile (" \
+ /* Check that the packet is at least 64B long */\
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r0 = r2; \
+ r0 += 64; \
+ if r0 > r3 goto l0_%=; \
+ /* sk = sk_lookup_tcp(ctx, skb->data, ...) */ \
+ r3 = %[sizeof_bpf_sock_tuple]; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_sk_lookup_tcp]; \
+ r6 = r0; \
+ if r0 == 0 goto l0_%=; \
+ r2 = *(u32*)(r0 + 4); \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: use ptr from bpf_tcp_sock() after release")
+__failure __msg("invalid mem access")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void bpf_tcp_sock_after_release(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ call %[bpf_tcp_sock]; \
+ if r0 != 0 goto l1_%=; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+l1_%=: r7 = r0; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ r0 = *(u32*)(r7 + %[bpf_tcp_sock_snd_cwnd]); \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm(bpf_tcp_sock),
+ __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: use ptr from bpf_sk_fullsock() after release")
+__failure __msg("invalid mem access")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void bpf_sk_fullsock_after_release(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+l1_%=: r7 = r0; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ r0 = *(u32*)(r7 + %[bpf_sock_type]); \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: use ptr from bpf_sk_fullsock(tp) after release")
+__failure __msg("invalid mem access")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void sk_fullsock_tp_after_release(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ call %[bpf_tcp_sock]; \
+ if r0 != 0 goto l1_%=; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+l1_%=: r1 = r0; \
+ call %[bpf_sk_fullsock]; \
+ r1 = r6; \
+ r6 = r0; \
+ call %[bpf_sk_release]; \
+ if r6 != 0 goto l2_%=; \
+ exit; \
+l2_%=: r0 = *(u32*)(r6 + %[bpf_sock_type]); \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm(bpf_tcp_sock),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: use sk after bpf_sk_release(tp)")
+__failure __msg("invalid mem access")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void after_bpf_sk_release_tp(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ call %[bpf_tcp_sock]; \
+ if r0 != 0 goto l1_%=; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+l1_%=: r1 = r0; \
+ call %[bpf_sk_release]; \
+ r0 = *(u32*)(r6 + %[bpf_sock_type]); \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm(bpf_tcp_sock),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)")
+__success __retval(0)
+__naked void after_bpf_sk_release_sk(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ call %[bpf_get_listener_sock]; \
+ if r0 != 0 goto l1_%=; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+l1_%=: r1 = r6; \
+ r6 = r0; \
+ call %[bpf_sk_release]; \
+ r0 = *(u32*)(r6 + %[bpf_sock_src_port]); \
+ exit; \
+" :
+ : __imm(bpf_get_listener_sock),
+ __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: bpf_sk_release(listen_sk)")
+__failure __msg("R1 must be referenced when passed to release function")
+__naked void bpf_sk_release_listen_sk(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ call %[bpf_get_listener_sock]; \
+ if r0 != 0 goto l1_%=; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+l1_%=: r1 = r0; \
+ call %[bpf_sk_release]; \
+ r0 = *(u32*)(r6 + %[bpf_sock_type]); \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+" :
+ : __imm(bpf_get_listener_sock),
+ __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+/* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
+SEC("tc")
+__description("reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)")
+__failure __msg("invalid mem access")
+__naked void and_bpf_tcp_sock_sk(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ call %[bpf_sk_fullsock]; \
+ r7 = r0; \
+ r1 = r6; \
+ call %[bpf_tcp_sock]; \
+ r8 = r0; \
+ if r7 != 0 goto l1_%=; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+l1_%=: r0 = *(u32*)(r8 + %[bpf_tcp_sock_snd_cwnd]); \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm(bpf_tcp_sock),
+ __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: branch tracking valid pointer null comparison")
+__success __retval(0)
+__naked void tracking_valid_pointer_null_comparison(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r6 = r0; \
+ r3 = 1; \
+ if r6 != 0 goto l0_%=; \
+ r3 = 0; \
+l0_%=: if r6 == 0 goto l1_%=; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+l1_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: branch tracking valid pointer value comparison")
+__failure __msg("Unreleased reference")
+__naked void tracking_valid_pointer_value_comparison(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r6 = r0; \
+ r3 = 1; \
+ if r6 == 0 goto l0_%=; \
+ r3 = 0; \
+ if r6 == 1234 goto l0_%=; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: bpf_sk_release(btf_tcp_sock)")
+__success
+__retval(0)
+__naked void sk_release_btf_tcp_sock(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ call %[bpf_skc_to_tcp_sock]; \
+ if r0 != 0 goto l1_%=; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+l1_%=: r1 = r0; \
+ call %[bpf_sk_release]; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm(bpf_skc_to_tcp_sock),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("reference tracking: use ptr from bpf_skc_to_tcp_sock() after release")
+__failure __msg("invalid mem access")
+__naked void to_tcp_sock_after_release(void)
+{
+ asm volatile (
+ BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ call %[bpf_skc_to_tcp_sock]; \
+ if r0 != 0 goto l1_%=; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ exit; \
+l1_%=: r7 = r0; \
+ r1 = r6; \
+ call %[bpf_sk_release]; \
+ r0 = *(u8*)(r7 + 0); \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm(bpf_skc_to_tcp_sock),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("reference tracking: try to leak released ptr reg")
+__success __failure_unpriv __msg_unpriv("R8 !read_ok")
+__retval(0)
+__naked void to_leak_released_ptr_reg(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ *(u32*)(r10 - 4) = r0; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r9 = r0; \
+ r0 = 0; \
+ r1 = %[map_ringbuf] ll; \
+ r2 = 8; \
+ r3 = 0; \
+ call %[bpf_ringbuf_reserve]; \
+ if r0 != 0 goto l1_%=; \
+ exit; \
+l1_%=: r8 = r0; \
+ r1 = r8; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard]; \
+ r0 = 0; \
+ *(u64*)(r9 + 0) = r8; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_ringbuf_discard),
+ __imm(bpf_ringbuf_reserve),
+ __imm_addr(map_array_48b),
+ __imm_addr(map_ringbuf)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_reg_equal.c b/tools/testing/selftests/bpf/progs/verifier_reg_equal.c
new file mode 100644
index 000000000000..dc1d8c30fb0e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_reg_equal.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("check w reg equal if r reg upper32 bits 0")
+__success
+__naked void subreg_equality_1(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ *(u64 *)(r10 - 8) = r0; \
+ r2 = *(u32 *)(r10 - 8); \
+ /* At this point upper 4-bytes of r2 are 0, \
+ * thus insn w3 = w2 should propagate reg id, \
+ * and w2 < 9 comparison would also propagate \
+ * the range for r3. \
+ */ \
+ w3 = w2; \
+ if w2 < 9 goto l0_%=; \
+ exit; \
+l0_%=: if r3 < 9 goto l1_%=; \
+ /* r1 read is illegal at this point */ \
+ r0 -= r1; \
+l1_%=: exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check w reg not equal if r reg upper32 bits not 0")
+__failure __msg("R1 !read_ok")
+__naked void subreg_equality_2(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ r2 = r0; \
+ /* Upper 4-bytes of r2 may not be 0, thus insn \
+ * w3 = w2 should not propagate reg id, and \
+ * w2 < 9 comparison should not propagate \
+ * the range for r3 either. \
+ */ \
+ w3 = w2; \
+ if w2 < 9 goto l0_%=; \
+ exit; \
+l0_%=: if r3 < 9 goto l1_%=; \
+ /* r1 read is illegal at this point */ \
+ r0 -= r1; \
+l1_%=: exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_regalloc.c b/tools/testing/selftests/bpf/progs/verifier_regalloc.c
new file mode 100644
index 000000000000..ee5ddea87c91
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_regalloc.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/regalloc.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+SEC("tracepoint")
+__description("regalloc basic")
+__success __flag(BPF_F_ANY_ALIGNMENT)
+__naked void regalloc_basic(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r7 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r2 = r0; \
+ if r0 s> 20 goto l0_%=; \
+ if r2 s< 0 goto l0_%=; \
+ r7 += r0; \
+ r7 += r2; \
+ r0 = *(u64*)(r7 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("regalloc negative")
+__failure __msg("invalid access to map value, value_size=48 off=48 size=1")
+__naked void regalloc_negative(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r7 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r2 = r0; \
+ if r0 s> 24 goto l0_%=; \
+ if r2 s< 0 goto l0_%=; \
+ r7 += r0; \
+ r7 += r2; \
+ r0 = *(u8*)(r7 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("regalloc src_reg mark")
+__success __flag(BPF_F_ANY_ALIGNMENT)
+__naked void regalloc_src_reg_mark(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r7 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r2 = r0; \
+ if r0 s> 20 goto l0_%=; \
+ r3 = 0; \
+ if r3 s>= r2 goto l0_%=; \
+ r7 += r0; \
+ r7 += r2; \
+ r0 = *(u64*)(r7 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("regalloc src_reg negative")
+__failure __msg("invalid access to map value, value_size=48 off=44 size=8")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void regalloc_src_reg_negative(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r7 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r2 = r0; \
+ if r0 s> 22 goto l0_%=; \
+ r3 = 0; \
+ if r3 s>= r2 goto l0_%=; \
+ r7 += r0; \
+ r7 += r2; \
+ r0 = *(u64*)(r7 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("regalloc and spill")
+__success __flag(BPF_F_ANY_ALIGNMENT)
+__naked void regalloc_and_spill(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r7 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r2 = r0; \
+ if r0 s> 20 goto l0_%=; \
+ /* r0 has upper bound that should propagate into r2 */\
+ *(u64*)(r10 - 8) = r2; /* spill r2 */ \
+ r0 = 0; \
+ r2 = 0; /* clear r0 and r2 */\
+ r3 = *(u64*)(r10 - 8); /* fill r3 */ \
+ if r0 s>= r3 goto l0_%=; \
+ /* r3 has lower and upper bounds */ \
+ r7 += r3; \
+ r0 = *(u64*)(r7 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("regalloc and spill negative")
+__failure __msg("invalid access to map value, value_size=48 off=48 size=8")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void regalloc_and_spill_negative(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r7 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r2 = r0; \
+ if r0 s> 48 goto l0_%=; \
+ /* r0 has upper bound that should propagate into r2 */\
+ *(u64*)(r10 - 8) = r2; /* spill r2 */ \
+ r0 = 0; \
+ r2 = 0; /* clear r0 and r2 */\
+ r3 = *(u64*)(r10 - 8); /* fill r3 */\
+ if r0 s>= r3 goto l0_%=; \
+ /* r3 has lower and upper bounds */ \
+ r7 += r3; \
+ r0 = *(u64*)(r7 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("regalloc three regs")
+__success __flag(BPF_F_ANY_ALIGNMENT)
+__naked void regalloc_three_regs(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r7 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r2 = r0; \
+ r4 = r2; \
+ if r0 s> 12 goto l0_%=; \
+ if r2 s< 0 goto l0_%=; \
+ r7 += r0; \
+ r7 += r2; \
+ r7 += r4; \
+ r0 = *(u64*)(r7 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("regalloc after call")
+__success __flag(BPF_F_ANY_ALIGNMENT)
+__naked void regalloc_after_call(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r7 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r8 = r0; \
+ r9 = r0; \
+ call regalloc_after_call__1; \
+ if r8 s> 20 goto l0_%=; \
+ if r9 s< 0 goto l0_%=; \
+ r7 += r8; \
+ r7 += r9; \
+ r0 = *(u64*)(r7 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void regalloc_after_call__1(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tracepoint")
+__description("regalloc in callee")
+__success __flag(BPF_F_ANY_ALIGNMENT)
+__naked void regalloc_in_callee(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r7 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = r0; \
+ r2 = r0; \
+ r3 = r7; \
+ call regalloc_in_callee__1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void regalloc_in_callee__1(void)
+{
+ asm volatile (" \
+ if r1 s> 20 goto l0_%=; \
+ if r2 s< 0 goto l0_%=; \
+ r3 += r1; \
+ r3 += r2; \
+ r0 = *(u64*)(r3 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tracepoint")
+__description("regalloc, spill, JEQ")
+__success
+__naked void regalloc_spill_jeq(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ *(u64*)(r10 - 8) = r0; /* spill r0 */ \
+ if r0 == 0 goto l0_%=; \
+l0_%=: /* The verifier will walk the rest twice with r0 == 0 and r0 == map_value */\
+ call %[bpf_get_prandom_u32]; \
+ r2 = r0; \
+ if r2 == 20 goto l1_%=; \
+l1_%=: /* The verifier will walk the rest two more times with r0 == 20 and r0 == unknown */\
+ r3 = *(u64*)(r10 - 8); /* fill r3 with map_value */\
+ if r3 == 0 goto l2_%=; /* skip ldx if map_value == NULL */\
+ /* Buggy verifier will think that r3 == 20 here */\
+ r0 = *(u64*)(r3 + 0); /* read from map_value */\
+l2_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_ringbuf.c b/tools/testing/selftests/bpf/progs/verifier_ringbuf.c
new file mode 100644
index 000000000000..ae1d521f326c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_ringbuf.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/ringbuf.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
+} map_ringbuf SEC(".maps");
+
+SEC("socket")
+__description("ringbuf: invalid reservation offset 1")
+__failure __msg("R1 must have zero offset when passed to release func")
+__failure_unpriv
+__naked void ringbuf_invalid_reservation_offset_1(void)
+{
+ asm volatile (" \
+ /* reserve 8 byte ringbuf memory */ \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r1 = %[map_ringbuf] ll; \
+ r2 = 8; \
+ r3 = 0; \
+ call %[bpf_ringbuf_reserve]; \
+ /* store a pointer to the reserved memory in R6 */\
+ r6 = r0; \
+ /* check whether the reservation was successful */\
+ if r0 == 0 goto l0_%=; \
+ /* spill R6(mem) into the stack */ \
+ *(u64*)(r10 - 8) = r6; \
+ /* fill it back in R7 */ \
+ r7 = *(u64*)(r10 - 8); \
+ /* should be able to access *(R7) = 0 */ \
+ r1 = 0; \
+ *(u64*)(r7 + 0) = r1; \
+ /* submit the reserved ringbuf memory */ \
+ r1 = r7; \
+ /* add invalid offset to reserved ringbuf memory */\
+ r1 += 0xcafe; \
+ r2 = 0; \
+ call %[bpf_ringbuf_submit]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ringbuf_reserve),
+ __imm(bpf_ringbuf_submit),
+ __imm_addr(map_ringbuf)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("ringbuf: invalid reservation offset 2")
+__failure __msg("R7 min value is outside of the allowed memory range")
+__failure_unpriv
+__naked void ringbuf_invalid_reservation_offset_2(void)
+{
+ asm volatile (" \
+ /* reserve 8 byte ringbuf memory */ \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r1 = %[map_ringbuf] ll; \
+ r2 = 8; \
+ r3 = 0; \
+ call %[bpf_ringbuf_reserve]; \
+ /* store a pointer to the reserved memory in R6 */\
+ r6 = r0; \
+ /* check whether the reservation was successful */\
+ if r0 == 0 goto l0_%=; \
+ /* spill R6(mem) into the stack */ \
+ *(u64*)(r10 - 8) = r6; \
+ /* fill it back in R7 */ \
+ r7 = *(u64*)(r10 - 8); \
+ /* add invalid offset to reserved ringbuf memory */\
+ r7 += 0xcafe; \
+ /* should be able to access *(R7) = 0 */ \
+ r1 = 0; \
+ *(u64*)(r7 + 0) = r1; \
+ /* submit the reserved ringbuf memory */ \
+ r1 = r7; \
+ r2 = 0; \
+ call %[bpf_ringbuf_submit]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ringbuf_reserve),
+ __imm(bpf_ringbuf_submit),
+ __imm_addr(map_ringbuf)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("ringbuf: check passing rb mem to helpers")
+__success __retval(0)
+__naked void passing_rb_mem_to_helpers(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ /* reserve 8 byte ringbuf memory */ \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r1 = %[map_ringbuf] ll; \
+ r2 = 8; \
+ r3 = 0; \
+ call %[bpf_ringbuf_reserve]; \
+ r7 = r0; \
+ /* check whether the reservation was successful */\
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: /* pass allocated ring buffer memory to fib lookup */\
+ r1 = r6; \
+ r2 = r0; \
+ r3 = 8; \
+ r4 = 0; \
+ call %[bpf_fib_lookup]; \
+ /* submit the ringbuf memory */ \
+ r1 = r7; \
+ r2 = 0; \
+ call %[bpf_ringbuf_submit]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_fib_lookup),
+ __imm(bpf_ringbuf_reserve),
+ __imm(bpf_ringbuf_submit),
+ __imm_addr(map_ringbuf)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_runtime_jit.c b/tools/testing/selftests/bpf/progs/verifier_runtime_jit.c
new file mode 100644
index 000000000000..27ebfc1fd9ee
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_runtime_jit.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/runtime_jit.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+void dummy_prog_42_socket(void);
+void dummy_prog_24_socket(void);
+void dummy_prog_loop1_socket(void);
+void dummy_prog_loop2_socket(void);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 4);
+ __uint(key_size, sizeof(int));
+ __array(values, void (void));
+} map_prog1_socket SEC(".maps") = {
+ .values = {
+ [0] = (void *)&dummy_prog_42_socket,
+ [1] = (void *)&dummy_prog_loop1_socket,
+ [2] = (void *)&dummy_prog_24_socket,
+ },
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 8);
+ __uint(key_size, sizeof(int));
+ __array(values, void (void));
+} map_prog2_socket SEC(".maps") = {
+ .values = {
+ [1] = (void *)&dummy_prog_loop2_socket,
+ [2] = (void *)&dummy_prog_24_socket,
+ [7] = (void *)&dummy_prog_42_socket,
+ },
+};
+
+SEC("socket")
+__auxiliary __auxiliary_unpriv
+__naked void dummy_prog_42_socket(void)
+{
+ asm volatile ("r0 = 42; exit;");
+}
+
+SEC("socket")
+__auxiliary __auxiliary_unpriv
+__naked void dummy_prog_24_socket(void)
+{
+ asm volatile ("r0 = 24; exit;");
+}
+
+SEC("socket")
+__auxiliary __auxiliary_unpriv
+__naked void dummy_prog_loop1_socket(void)
+{
+ asm volatile (" \
+ r3 = 1; \
+ r2 = %[map_prog1_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 41; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket)
+ : __clobber_all);
+}
+
+SEC("socket")
+__auxiliary __auxiliary_unpriv
+__naked void dummy_prog_loop2_socket(void)
+{
+ asm volatile (" \
+ r3 = 1; \
+ r2 = %[map_prog2_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 41; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog2_socket)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: tail_call within bounds, prog once")
+__success __success_unpriv __retval(42)
+__naked void call_within_bounds_prog_once(void)
+{
+ asm volatile (" \
+ r3 = 0; \
+ r2 = %[map_prog1_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: tail_call within bounds, prog loop")
+__success __success_unpriv __retval(41)
+__naked void call_within_bounds_prog_loop(void)
+{
+ asm volatile (" \
+ r3 = 1; \
+ r2 = %[map_prog1_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: tail_call within bounds, no prog")
+__success __success_unpriv __retval(1)
+__naked void call_within_bounds_no_prog(void)
+{
+ asm volatile (" \
+ r3 = 3; \
+ r2 = %[map_prog1_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: tail_call within bounds, key 2")
+__success __success_unpriv __retval(24)
+__naked void call_within_bounds_key_2(void)
+{
+ asm volatile (" \
+ r3 = 2; \
+ r2 = %[map_prog1_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: tail_call within bounds, key 2 / key 2, first branch")
+__success __success_unpriv __retval(24)
+__naked void _2_key_2_first_branch(void)
+{
+ asm volatile (" \
+ r0 = 13; \
+ *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \
+ r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \
+ if r0 == 13 goto l0_%=; \
+ r3 = 2; \
+ r2 = %[map_prog1_socket] ll; \
+ goto l1_%=; \
+l0_%=: r3 = 2; \
+ r2 = %[map_prog1_socket] ll; \
+l1_%=: call %[bpf_tail_call]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket),
+ __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: tail_call within bounds, key 2 / key 2, second branch")
+__success __success_unpriv __retval(24)
+__naked void _2_key_2_second_branch(void)
+{
+ asm volatile (" \
+ r0 = 14; \
+ *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \
+ r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \
+ if r0 == 13 goto l0_%=; \
+ r3 = 2; \
+ r2 = %[map_prog1_socket] ll; \
+ goto l1_%=; \
+l0_%=: r3 = 2; \
+ r2 = %[map_prog1_socket] ll; \
+l1_%=: call %[bpf_tail_call]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket),
+ __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: tail_call within bounds, key 0 / key 2, first branch")
+__success __success_unpriv __retval(24)
+__naked void _0_key_2_first_branch(void)
+{
+ asm volatile (" \
+ r0 = 13; \
+ *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \
+ r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \
+ if r0 == 13 goto l0_%=; \
+ r3 = 0; \
+ r2 = %[map_prog1_socket] ll; \
+ goto l1_%=; \
+l0_%=: r3 = 2; \
+ r2 = %[map_prog1_socket] ll; \
+l1_%=: call %[bpf_tail_call]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket),
+ __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: tail_call within bounds, key 0 / key 2, second branch")
+__success __success_unpriv __retval(42)
+__naked void _0_key_2_second_branch(void)
+{
+ asm volatile (" \
+ r0 = 14; \
+ *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \
+ r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \
+ if r0 == 13 goto l0_%=; \
+ r3 = 0; \
+ r2 = %[map_prog1_socket] ll; \
+ goto l1_%=; \
+l0_%=: r3 = 2; \
+ r2 = %[map_prog1_socket] ll; \
+l1_%=: call %[bpf_tail_call]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket),
+ __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: tail_call within bounds, different maps, first branch")
+__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr")
+__retval(1)
+__naked void bounds_different_maps_first_branch(void)
+{
+ asm volatile (" \
+ r0 = 13; \
+ *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \
+ r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \
+ if r0 == 13 goto l0_%=; \
+ r3 = 0; \
+ r2 = %[map_prog1_socket] ll; \
+ goto l1_%=; \
+l0_%=: r3 = 0; \
+ r2 = %[map_prog2_socket] ll; \
+l1_%=: call %[bpf_tail_call]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket),
+ __imm_addr(map_prog2_socket),
+ __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: tail_call within bounds, different maps, second branch")
+__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr")
+__retval(42)
+__naked void bounds_different_maps_second_branch(void)
+{
+ asm volatile (" \
+ r0 = 14; \
+ *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \
+ r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \
+ if r0 == 13 goto l0_%=; \
+ r3 = 0; \
+ r2 = %[map_prog1_socket] ll; \
+ goto l1_%=; \
+l0_%=: r3 = 0; \
+ r2 = %[map_prog2_socket] ll; \
+l1_%=: call %[bpf_tail_call]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket),
+ __imm_addr(map_prog2_socket),
+ __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: tail_call out of bounds")
+__success __success_unpriv __retval(2)
+__naked void tail_call_out_of_bounds(void)
+{
+ asm volatile (" \
+ r3 = 256; \
+ r2 = %[map_prog1_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 2; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: pass negative index to tail_call")
+__success __success_unpriv __retval(2)
+__naked void negative_index_to_tail_call(void)
+{
+ asm volatile (" \
+ r3 = -1; \
+ r2 = %[map_prog1_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 2; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("runtime/jit: pass > 32bit index to tail_call")
+__success __success_unpriv __retval(42)
+/* Verifier rewrite for unpriv skips tail call here. */
+__retval_unpriv(2)
+__naked void _32bit_index_to_tail_call(void)
+{
+ asm volatile (" \
+ r3 = 0x100000000 ll; \
+ r2 = %[map_prog1_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 2; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
new file mode 100644
index 000000000000..7c5e5e6d10eb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/* Check that precision marks propagate through scalar IDs.
+ * Registers r{0,1,2} have the same scalar ID.
+ * Range information is propagated for scalars sharing same ID.
+ * Check that precision mark for r0 causes precision marks for r{1,2}
+ * when range information is propagated for 'if <reg> <op> <const>' insn.
+ */
+SEC("socket")
+__success __log_level(2)
+/* first 'if' branch */
+__msg("6: (0f) r3 += r0")
+__msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0")
+__msg("frame0: parent state regs=r0,r1,r2 stack=:")
+__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
+/* second 'if' branch */
+__msg("from 4 to 5: ")
+__msg("6: (0f) r3 += r0")
+__msg("frame0: regs=r0 stack= before 5: (bf) r3 = r10")
+__msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0")
+/* parent state already has r{0,1,2} as precise */
+__msg("frame0: parent state regs= stack=:")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void linked_regs_bpf_k(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id == r2.id */
+ "r1 = r0;"
+ "r2 = r0;"
+ "if r1 > 7 goto +0;"
+ /* force r0 to be precise, this eventually marks r1 and r2 as
+ * precise as well because of shared IDs
+ */
+ "r3 = r10;"
+ "r3 += r0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Registers r{0,1,2} share same ID when 'if r1 > ...' insn is processed,
+ * check that verifier marks r{1,2} as precise while backtracking
+ * 'if r1 > ...' with r0 already marked.
+ */
+SEC("socket")
+__success __log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+__msg("frame0: regs=r0 stack= before 5: (2d) if r1 > r3 goto pc+0")
+__msg("frame0: parent state regs=r0,r1,r2,r3 stack=:")
+__msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7")
+__naked void linked_regs_bpf_x_src(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id == r2.id */
+ "r1 = r0;"
+ "r2 = r0;"
+ "r3 = 7;"
+ "if r1 > r3 goto +0;"
+ /* force r0 to be precise, this eventually marks r1 and r2 as
+ * precise as well because of shared IDs
+ */
+ "r4 = r10;"
+ "r4 += r0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Registers r{0,1,2} share same ID when 'if r1 > r3' insn is processed,
+ * check that verifier marks r{0,1,2} as precise while backtracking
+ * 'if r1 > r3' with r3 already marked.
+ */
+SEC("socket")
+__success __log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+__msg("frame0: regs=r3 stack= before 5: (2d) if r1 > r3 goto pc+0")
+__msg("frame0: parent state regs=r0,r1,r2,r3 stack=:")
+__msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7")
+__naked void linked_regs_bpf_x_dst(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id == r2.id */
+ "r1 = r0;"
+ "r2 = r0;"
+ "r3 = 7;"
+ "if r1 > r3 goto +0;"
+ /* force r0 to be precise, this eventually marks r1 and r2 as
+ * precise as well because of shared IDs
+ */
+ "r4 = r10;"
+ "r4 += r3;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Same as linked_regs_bpf_k, but break one of the
+ * links, note that r1 is absent from regs=... in __msg below.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("7: (0f) r3 += r0")
+__msg("frame0: regs=r0 stack= before 6: (bf) r3 = r10")
+__msg("frame0: parent state regs=r0 stack=:")
+__msg("frame0: regs=r0 stack= before 5: (25) if r0 > 0x7 goto pc+0")
+__msg("frame0: parent state regs=r0,r2 stack=:")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void linked_regs_broken_link(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id == r2.id */
+ "r1 = r0;"
+ "r2 = r0;"
+ /* break link for r1, this is the only line that differs
+ * compared to the previous test
+ */
+ "r1 = 0;"
+ "if r0 > 7 goto +0;"
+ /* force r0 to be precise,
+ * this eventually marks r2 as precise because of shared IDs
+ */
+ "r3 = r10;"
+ "r3 += r0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Check that precision marks propagate through scalar IDs.
+ * Use the same scalar ID in multiple stack frames, check that
+ * precision information is propagated up the call stack.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("12: (0f) r2 += r1")
+/* Current state */
+__msg("frame2: last_idx 12 first_idx 11 subseq_idx -1 ")
+__msg("frame2: regs=r1 stack= before 11: (bf) r2 = r10")
+__msg("frame2: parent state regs=r1 stack=")
+__msg("frame1: parent state regs= stack=")
+__msg("frame0: parent state regs= stack=")
+/* Parent state */
+__msg("frame2: last_idx 10 first_idx 10 subseq_idx 11 ")
+__msg("frame2: regs=r1 stack= before 10: (25) if r1 > 0x7 goto pc+0")
+__msg("frame2: parent state regs=r1 stack=")
+/* frame1.r{6,7} are marked because mark_precise_scalar_ids()
+ * looks for all registers with frame2.r1.id in the current state
+ */
+__msg("frame1: parent state regs=r6,r7 stack=")
+__msg("frame0: parent state regs=r6 stack=")
+/* Parent state */
+__msg("frame2: last_idx 8 first_idx 8 subseq_idx 10")
+__msg("frame2: regs=r1 stack= before 8: (85) call pc+1")
+/* frame1.r1 is marked because of backtracking of call instruction */
+__msg("frame1: parent state regs=r1,r6,r7 stack=")
+__msg("frame0: parent state regs=r6 stack=")
+/* Parent state */
+__msg("frame1: last_idx 7 first_idx 6 subseq_idx 8")
+__msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1")
+__msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1")
+__msg("frame1: parent state regs=r1 stack=")
+__msg("frame0: parent state regs=r6 stack=")
+/* Parent state */
+__msg("frame1: last_idx 4 first_idx 4 subseq_idx 6")
+__msg("frame1: regs=r1 stack= before 4: (85) call pc+1")
+__msg("frame0: parent state regs=r1,r6 stack=")
+/* Parent state */
+__msg("frame0: last_idx 3 first_idx 1 subseq_idx 4")
+__msg("frame0: regs=r1,r6 stack= before 3: (bf) r6 = r0")
+__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
+__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void precision_many_frames(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id == r6.id */
+ "r1 = r0;"
+ "r6 = r0;"
+ "call precision_many_frames__foo;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+static __naked __noinline __used
+void precision_many_frames__foo(void)
+{
+ asm volatile (
+ /* conflate one of the register numbers (r6) with outer frame,
+ * to verify that those are tracked independently
+ */
+ "r6 = r1;"
+ "r7 = r1;"
+ "call precision_many_frames__bar;"
+ "exit"
+ ::: __clobber_all);
+}
+
+static __naked __noinline __used
+void precision_many_frames__bar(void)
+{
+ asm volatile (
+ "if r1 > 7 goto +0;"
+ /* force r1 to be precise, this eventually marks:
+ * - bar frame r1
+ * - foo frame r{1,6,7}
+ * - main frame r{1,6}
+ */
+ "r2 = r10;"
+ "r2 += r1;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Check that scalars with the same IDs are marked precise on stack as
+ * well as in registers.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("11: (0f) r2 += r1")
+/* foo frame */
+__msg("frame1: regs=r1 stack= before 10: (bf) r2 = r10")
+__msg("frame1: regs=r1 stack= before 9: (25) if r1 > 0x7 goto pc+0")
+__msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1")
+__msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1")
+__msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
+/* main frame */
+__msg("frame0: regs=r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1")
+__msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0")
+__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void precision_stack(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id == fp[-8].id */
+ "r1 = r0;"
+ "*(u64*)(r10 - 8) = r1;"
+ "call precision_stack__foo;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+static __naked __noinline __used
+void precision_stack__foo(void)
+{
+ asm volatile (
+ /* conflate one of the register numbers (r6) with outer frame,
+ * to verify that those are tracked independently
+ */
+ "*(u64*)(r10 - 8) = r1;"
+ "*(u64*)(r10 - 16) = r1;"
+ "if r1 > 7 goto +0;"
+ /* force r1 to be precise, this eventually marks:
+ * - foo frame r1,fp{-8,-16}
+ * - main frame r1,fp{-8}
+ */
+ "r2 = r10;"
+ "r2 += r1;"
+ "exit"
+ ::: __clobber_all);
+}
+
+/* Use two separate scalar IDs to check that these are propagated
+ * independently.
+ */
+SEC("socket")
+__success __log_level(2)
+/* r{6,7} */
+__msg("12: (0f) r3 += r7")
+__msg("frame0: regs=r7 stack= before 11: (bf) r3 = r10")
+__msg("frame0: regs=r7 stack= before 9: (25) if r7 > 0x7 goto pc+0")
+/* ... skip some insns ... */
+__msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0")
+__msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0")
+/* r{8,9} */
+__msg("13: (0f) r3 += r9")
+__msg("frame0: regs=r9 stack= before 12: (0f) r3 += r7")
+/* ... skip some insns ... */
+__msg("frame0: regs=r9 stack= before 10: (25) if r9 > 0x7 goto pc+0")
+__msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0")
+__msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void precision_two_ids(void)
+{
+ asm volatile (
+ /* r6 = random number up to 0xff
+ * r6.id == r7.id
+ */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ "r6 = r0;"
+ "r7 = r0;"
+ /* same, but for r{8,9} */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ "r8 = r0;"
+ "r9 = r0;"
+ /* clear r0 id */
+ "r0 = 0;"
+ /* propagate equal scalars precision */
+ "if r7 > 7 goto +0;"
+ "if r9 > 7 goto +0;"
+ "r3 = r10;"
+ /* force r7 to be precise, this also marks r6 */
+ "r3 += r7;"
+ /* force r9 to be precise, this also marks r8 */
+ "r3 += r9;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__success __log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+/* check thar r0 and r6 have different IDs after 'if',
+ * collect_linked_regs() can't tie more than 6 registers for a single insn.
+ */
+__msg("8: (25) if r0 > 0x7 goto pc+0 ; R0=scalar(id=1")
+__msg("9: (bf) r6 = r6 ; R6_w=scalar(id=2")
+/* check that r{0-5} are marked precise after 'if' */
+__msg("frame0: regs=r0 stack= before 8: (25) if r0 > 0x7 goto pc+0")
+__msg("frame0: parent state regs=r0,r1,r2,r3,r4,r5 stack=:")
+__naked void linked_regs_too_many_regs(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r{0-6} IDs */
+ "r1 = r0;"
+ "r2 = r0;"
+ "r3 = r0;"
+ "r4 = r0;"
+ "r5 = r0;"
+ "r6 = r0;"
+ /* propagate range for r{0-6} */
+ "if r0 > 7 goto +0;"
+ /* make r6 appear in the log */
+ "r6 = r6;"
+ /* force r0 to be precise,
+ * this would cause r{0-4} to be precise because of shared IDs
+ */
+ "r7 = r10;"
+ "r7 += r0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__failure __log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+__msg("regs=r7 stack= before 5: (3d) if r8 >= r0")
+__msg("parent state regs=r0,r7,r8")
+__msg("regs=r0,r7,r8 stack= before 4: (25) if r0 > 0x1")
+__msg("div by zero")
+__naked void linked_regs_broken_link_2(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r8 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 > 1 goto +0;"
+ /* r7.id == r8.id,
+ * thus r7 precision implies r8 precision,
+ * which implies r0 precision because of the conditional below.
+ */
+ "if r8 >= r0 goto 1f;"
+ /* break id relation between r7 and r8 */
+ "r8 += r8;"
+ /* make r7 precise */
+ "if r7 == 0 goto 1f;"
+ "r0 /= 0;"
+"1:"
+ "r0 = 42;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* Check that mark_chain_precision() for one of the conditional jump
+ * operands does not trigger equal scalars precision propagation.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("3: (25) if r1 > 0x100 goto pc+0")
+__msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0")
+__naked void cjmp_no_linked_regs_trigger(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id */
+ "r1 = r0;"
+ /* the jump below would be predicted, thus r1 would be marked precise,
+ * this should not imply precision mark for r0
+ */
+ "if r1 > 256 goto +0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Verify that check_ids() is used by regsafe() for scalars.
+ *
+ * r9 = ... some pointer with range X ...
+ * r6 = ... unbound scalar ID=a ...
+ * r7 = ... unbound scalar ID=b ...
+ * if (r6 > r7) goto +1
+ * r7 = r6
+ * if (r7 > X) goto exit
+ * r9 += r6
+ * ... access memory using r9 ...
+ *
+ * The memory access is safe only if r7 is bounded,
+ * which is true for one branch and not true for another.
+ */
+SEC("socket")
+__failure __msg("register with unbounded min value")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void check_ids_in_regsafe(void)
+{
+ asm volatile (
+ /* Bump allocated stack */
+ "r1 = 0;"
+ "*(u64*)(r10 - 8) = r1;"
+ /* r9 = pointer to stack */
+ "r9 = r10;"
+ "r9 += -8;"
+ /* r7 = ktime_get_ns() */
+ "call %[bpf_ktime_get_ns];"
+ "r7 = r0;"
+ /* r6 = ktime_get_ns() */
+ "call %[bpf_ktime_get_ns];"
+ "r6 = r0;"
+ /* if r6 > r7 is an unpredictable jump */
+ "if r6 > r7 goto l1_%=;"
+ "r7 = r6;"
+"l1_%=:"
+ /* if r7 > 4 ...; transfers range to r6 on one execution path
+ * but does not transfer on another
+ */
+ "if r7 > 4 goto l2_%=;"
+ /* Access memory at r9[r6], r6 is not always bounded */
+ "r9 += r6;"
+ "r0 = *(u8*)(r9 + 0);"
+"l2_%=:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Similar to check_ids_in_regsafe.
+ * The l0 could be reached in two states:
+ *
+ * (1) r6{.id=A}, r7{.id=A}, r8{.id=B}
+ * (2) r6{.id=B}, r7{.id=A}, r8{.id=B}
+ *
+ * Where (2) is not safe, as "r7 > 4" check won't propagate range for it.
+ * This example would be considered safe without changes to
+ * mark_chain_precision() to track scalar values with equal IDs.
+ */
+SEC("socket")
+__failure __msg("register with unbounded min value")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void check_ids_in_regsafe_2(void)
+{
+ asm volatile (
+ /* Bump allocated stack */
+ "r1 = 0;"
+ "*(u64*)(r10 - 8) = r1;"
+ /* r9 = pointer to stack */
+ "r9 = r10;"
+ "r9 += -8;"
+ /* r8 = ktime_get_ns() */
+ "call %[bpf_ktime_get_ns];"
+ "r8 = r0;"
+ /* r7 = ktime_get_ns() */
+ "call %[bpf_ktime_get_ns];"
+ "r7 = r0;"
+ /* r6 = ktime_get_ns() */
+ "call %[bpf_ktime_get_ns];"
+ "r6 = r0;"
+ /* scratch .id from r0 */
+ "r0 = 0;"
+ /* if r6 > r7 is an unpredictable jump */
+ "if r6 > r7 goto l1_%=;"
+ /* tie r6 and r7 .id */
+ "r6 = r7;"
+"l0_%=:"
+ /* if r7 > 4 exit(0) */
+ "if r7 > 4 goto l2_%=;"
+ /* Access memory at r9[r6] */
+ "r9 += r6;"
+ "r0 = *(u8*)(r9 + 0);"
+"l2_%=:"
+ "r0 = 0;"
+ "exit;"
+"l1_%=:"
+ /* tie r6 and r8 .id */
+ "r6 = r8;"
+ "goto l0_%=;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Check that scalar IDs *are not* generated on register to register
+ * assignments if source register is a constant.
+ *
+ * If such IDs *are* generated the 'l1' below would be reached in
+ * two states:
+ *
+ * (1) r1{.id=A}, r2{.id=A}
+ * (2) r1{.id=C}, r2{.id=C}
+ *
+ * Thus forcing 'if r1 == r2' verification twice.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("11: (1d) if r3 == r4 goto pc+0")
+__msg("frame 0: propagating r3,r4")
+__msg("11: safe")
+__msg("processed 15 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void no_scalar_id_for_const(void)
+{
+ asm volatile (
+ "call %[bpf_ktime_get_ns];"
+ /* unpredictable jump */
+ "if r0 > 7 goto l0_%=;"
+ /* possibly generate same scalar ids for r3 and r4 */
+ "r1 = 0;"
+ "r1 = r1;"
+ "r3 = r1;"
+ "r4 = r1;"
+ "goto l1_%=;"
+"l0_%=:"
+ /* possibly generate different scalar ids for r3 and r4 */
+ "r1 = 0;"
+ "r2 = 0;"
+ "r3 = r1;"
+ "r4 = r2;"
+"l1_%=:"
+ /* predictable jump, marks r3 and r4 precise */
+ "if r3 == r4 goto +0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Same as no_scalar_id_for_const() but for 32-bit values */
+SEC("socket")
+__success __log_level(2)
+__msg("11: (1e) if w3 == w4 goto pc+0")
+__msg("frame 0: propagating r3,r4")
+__msg("11: safe")
+__msg("processed 15 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void no_scalar_id_for_const32(void)
+{
+ asm volatile (
+ "call %[bpf_ktime_get_ns];"
+ /* unpredictable jump */
+ "if r0 > 7 goto l0_%=;"
+ /* possibly generate same scalar ids for r3 and r4 */
+ "w1 = 0;"
+ "w1 = w1;"
+ "w3 = w1;"
+ "w4 = w1;"
+ "goto l1_%=;"
+"l0_%=:"
+ /* possibly generate different scalar ids for r3 and r4 */
+ "w1 = 0;"
+ "w2 = 0;"
+ "w3 = w1;"
+ "w4 = w2;"
+"l1_%=:"
+ /* predictable jump, marks r1 and r2 precise */
+ "if w3 == w4 goto +0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Check that unique scalar IDs are ignored when new verifier state is
+ * compared to cached verifier state. For this test:
+ * - cached state has no id on r1
+ * - new state has a unique id on r1
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("6: (25) if r6 > 0x7 goto pc+1")
+__msg("7: (57) r1 &= 255")
+__msg("8: (bf) r2 = r10")
+__msg("from 6 to 8: safe")
+__msg("processed 12 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void ignore_unique_scalar_ids_cur(void)
+{
+ asm volatile (
+ "call %[bpf_ktime_get_ns];"
+ "r6 = r0;"
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* r1.id == r0.id */
+ "r1 = r0;"
+ /* make r1.id unique */
+ "r0 = 0;"
+ "if r6 > 7 goto l0_%=;"
+ /* clear r1 id, but keep the range compatible */
+ "r1 &= 0xff;"
+"l0_%=:"
+ /* get here in two states:
+ * - first: r1 has no id (cached state)
+ * - second: r1 has a unique id (should be considered equivalent)
+ */
+ "r2 = r10;"
+ "r2 += r1;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Check that unique scalar IDs are ignored when new verifier state is
+ * compared to cached verifier state. For this test:
+ * - cached state has a unique id on r1
+ * - new state has no id on r1
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("6: (25) if r6 > 0x7 goto pc+1")
+__msg("7: (05) goto pc+1")
+__msg("9: (bf) r2 = r10")
+__msg("9: safe")
+__msg("processed 13 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void ignore_unique_scalar_ids_old(void)
+{
+ asm volatile (
+ "call %[bpf_ktime_get_ns];"
+ "r6 = r0;"
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* r1.id == r0.id */
+ "r1 = r0;"
+ /* make r1.id unique */
+ "r0 = 0;"
+ "if r6 > 7 goto l1_%=;"
+ "goto l0_%=;"
+"l1_%=:"
+ /* clear r1 id, but keep the range compatible */
+ "r1 &= 0xff;"
+"l0_%=:"
+ /* get here in two states:
+ * - first: r1 has a unique id (cached state)
+ * - second: r1 has no id (should be considered equivalent)
+ */
+ "r2 = r10;"
+ "r2 += r1;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Check that two different scalar IDs in a verified state can't be
+ * mapped to the same scalar ID in current state.
+ */
+SEC("socket")
+__success __log_level(2)
+/* The exit instruction should be reachable from two states,
+ * use two matches and "processed .. insns" to ensure this.
+ */
+__msg("13: (95) exit")
+__msg("13: (95) exit")
+__msg("processed 18 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void two_old_ids_one_cur_id(void)
+{
+ asm volatile (
+ /* Give unique scalar IDs to r{6,7} */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ "r6 = r0;"
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ "r7 = r0;"
+ "r0 = 0;"
+ /* Maybe make r{6,7} IDs identical */
+ "if r6 > r7 goto l0_%=;"
+ "goto l1_%=;"
+"l0_%=:"
+ "r6 = r7;"
+"l1_%=:"
+ /* Mark r{6,7} precise.
+ * Get here in two states:
+ * - first: r6{.id=A}, r7{.id=B} (cached state)
+ * - second: r6{.id=A}, r7{.id=A}
+ * Currently we don't want to consider such states equivalent.
+ * Thus "exit;" would be verified twice.
+ */
+ "r2 = r10;"
+ "r2 += r6;"
+ "r2 += r7;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+/* Note the flag, see verifier.c:opt_subreg_zext_lo32_rnd_hi32() */
+__flag(BPF_F_TEST_RND_HI32)
+__success
+/* This test was added because of a bug in verifier.c:sync_linked_regs(),
+ * upon range propagation it destroyed subreg_def marks for registers.
+ * The subreg_def mark is used to decide whether zero extension instructions
+ * are needed when register is read. When BPF_F_TEST_RND_HI32 is set it
+ * also causes generation of statements to randomize upper halves of
+ * read registers.
+ *
+ * The test is written in a way to return an upper half of a register
+ * that is affected by range propagation and must have it's subreg_def
+ * preserved. This gives a return value of 0 and leads to undefined
+ * return value if subreg_def mark is not preserved.
+ */
+__retval(0)
+/* Check that verifier believes r1/r0 are zero at exit */
+__log_level(2)
+__msg("4: (77) r1 >>= 32 ; R1_w=0")
+__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0")
+__msg("6: (95) exit")
+__msg("from 3 to 4")
+__msg("4: (77) r1 >>= 32 ; R1_w=0")
+__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0")
+__msg("6: (95) exit")
+/* Verify that statements to randomize upper half of r1 had not been
+ * generated.
+ */
+__xlated("call unknown")
+__xlated("r0 &= 2147483647")
+__xlated("w1 = w0")
+/* This is how disasm.c prints BPF_ZEXT_REG at the moment, x86 and arm
+ * are the only CI archs that do not need zero extension for subregs.
+ */
+#if !defined(__TARGET_ARCH_x86) && !defined(__TARGET_ARCH_arm64)
+__xlated("w1 = w1")
+#endif
+__xlated("if w0 < 0xa goto pc+0")
+__xlated("r1 >>= 32")
+__xlated("r0 = r1")
+__xlated("exit")
+__naked void linked_regs_and_subreg_def(void)
+{
+ asm volatile (
+ "call %[bpf_ktime_get_ns];"
+ /* make sure r0 is in 32-bit range, otherwise w1 = w0 won't
+ * assign same IDs to registers.
+ */
+ "r0 &= 0x7fffffff;"
+ /* link w1 and w0 via ID */
+ "w1 = w0;"
+ /* 'if' statement propagates range info from w0 to w1,
+ * but should not affect w1->subreg_def property.
+ */
+ "if w0 < 10 goto +0;"
+ /* r1 is read here, on archs that require subreg zero
+ * extension this would cause zext patch generation.
+ */
+ "r1 >>= 32;"
+ "r0 = r1;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_sdiv.c b/tools/testing/selftests/bpf/progs/verifier_sdiv.c
new file mode 100644
index 000000000000..148d2299e5b4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_sdiv.c
@@ -0,0 +1,1224 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <limits.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 1")
+__success __success_unpriv __retval(-20)
+__naked void sdiv32_non_zero_imm_1(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w0 s/= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 2")
+__success __success_unpriv __retval(-20)
+__naked void sdiv32_non_zero_imm_2(void)
+{
+ asm volatile (" \
+ w0 = 41; \
+ w0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 3")
+__success __success_unpriv __retval(20)
+__naked void sdiv32_non_zero_imm_3(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 4")
+__success __success_unpriv __retval(-21)
+__naked void sdiv32_non_zero_imm_4(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w0 s/= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 5")
+__success __success_unpriv __retval(-21)
+__naked void sdiv32_non_zero_imm_5(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 6")
+__success __success_unpriv __retval(21)
+__naked void sdiv32_non_zero_imm_6(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 7")
+__success __success_unpriv __retval(21)
+__naked void sdiv32_non_zero_imm_7(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w0 s/= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 8")
+__success __success_unpriv __retval(20)
+__naked void sdiv32_non_zero_imm_8(void)
+{
+ asm volatile (" \
+ w0 = 41; \
+ w0 s/= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 1")
+__success __success_unpriv __retval(-20)
+__naked void sdiv32_non_zero_reg_1(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w1 = 2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 2")
+__success __success_unpriv __retval(-20)
+__naked void sdiv32_non_zero_reg_2(void)
+{
+ asm volatile (" \
+ w0 = 41; \
+ w1 = -2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 3")
+__success __success_unpriv __retval(20)
+__naked void sdiv32_non_zero_reg_3(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w1 = -2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 4")
+__success __success_unpriv __retval(-21)
+__naked void sdiv32_non_zero_reg_4(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w1 = 2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 5")
+__success __success_unpriv __retval(-21)
+__naked void sdiv32_non_zero_reg_5(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = -2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 6")
+__success __success_unpriv __retval(21)
+__naked void sdiv32_non_zero_reg_6(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w1 = -2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 7")
+__success __success_unpriv __retval(21)
+__naked void sdiv32_non_zero_reg_7(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = 2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 8")
+__success __success_unpriv __retval(20)
+__naked void sdiv32_non_zero_reg_8(void)
+{
+ asm volatile (" \
+ w0 = 41; \
+ w1 = 2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero imm divisor, check 1")
+__success __success_unpriv __retval(-20)
+__naked void sdiv64_non_zero_imm_1(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r0 s/= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero imm divisor, check 2")
+__success __success_unpriv __retval(-20)
+__naked void sdiv64_non_zero_imm_2(void)
+{
+ asm volatile (" \
+ r0 = 41; \
+ r0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero imm divisor, check 3")
+__success __success_unpriv __retval(20)
+__naked void sdiv64_non_zero_imm_3(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero imm divisor, check 4")
+__success __success_unpriv __retval(-21)
+__naked void sdiv64_non_zero_imm_4(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r0 s/= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero imm divisor, check 5")
+__success __success_unpriv __retval(-21)
+__naked void sdiv64_non_zero_imm_5(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero imm divisor, check 6")
+__success __success_unpriv __retval(21)
+__naked void sdiv64_non_zero_imm_6(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero reg divisor, check 1")
+__success __success_unpriv __retval(-20)
+__naked void sdiv64_non_zero_reg_1(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r1 = 2; \
+ r0 s/= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero reg divisor, check 2")
+__success __success_unpriv __retval(-20)
+__naked void sdiv64_non_zero_reg_2(void)
+{
+ asm volatile (" \
+ r0 = 41; \
+ r1 = -2; \
+ r0 s/= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero reg divisor, check 3")
+__success __success_unpriv __retval(20)
+__naked void sdiv64_non_zero_reg_3(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r1 = -2; \
+ r0 s/= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero reg divisor, check 4")
+__success __success_unpriv __retval(-21)
+__naked void sdiv64_non_zero_reg_4(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r1 = 2; \
+ r0 s/= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero reg divisor, check 5")
+__success __success_unpriv __retval(-21)
+__naked void sdiv64_non_zero_reg_5(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = -2; \
+ r0 s/= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero reg divisor, check 6")
+__success __success_unpriv __retval(21)
+__naked void sdiv64_non_zero_reg_6(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r1 = -2; \
+ r0 s/= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero imm divisor, check 1")
+__success __success_unpriv __retval(-1)
+__naked void smod32_non_zero_imm_1(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w0 s%%= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero imm divisor, check 2")
+__success __success_unpriv __retval(1)
+__naked void smod32_non_zero_imm_2(void)
+{
+ asm volatile (" \
+ w0 = 41; \
+ w0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero imm divisor, check 3")
+__success __success_unpriv __retval(-1)
+__naked void smod32_non_zero_imm_3(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero imm divisor, check 4")
+__success __success_unpriv __retval(0)
+__naked void smod32_non_zero_imm_4(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w0 s%%= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero imm divisor, check 5")
+__success __success_unpriv __retval(0)
+__naked void smod32_non_zero_imm_5(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero imm divisor, check 6")
+__success __success_unpriv __retval(0)
+__naked void smod32_non_zero_imm_6(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero reg divisor, check 1")
+__success __success_unpriv __retval(-1)
+__naked void smod32_non_zero_reg_1(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w1 = 2; \
+ w0 s%%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero reg divisor, check 2")
+__success __success_unpriv __retval(1)
+__naked void smod32_non_zero_reg_2(void)
+{
+ asm volatile (" \
+ w0 = 41; \
+ w1 = -2; \
+ w0 s%%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero reg divisor, check 3")
+__success __success_unpriv __retval(-1)
+__naked void smod32_non_zero_reg_3(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w1 = -2; \
+ w0 s%%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero reg divisor, check 4")
+__success __success_unpriv __retval(0)
+__naked void smod32_non_zero_reg_4(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w1 = 2; \
+ w0 s%%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero reg divisor, check 5")
+__success __success_unpriv __retval(0)
+__naked void smod32_non_zero_reg_5(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = -2; \
+ w0 s%%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero reg divisor, check 6")
+__success __success_unpriv __retval(0)
+__naked void smod32_non_zero_reg_6(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w1 = -2; \
+ w0 s%%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 1")
+__success __success_unpriv __retval(-1)
+__naked void smod64_non_zero_imm_1(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r0 s%%= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 2")
+__success __success_unpriv __retval(1)
+__naked void smod64_non_zero_imm_2(void)
+{
+ asm volatile (" \
+ r0 = 41; \
+ r0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 3")
+__success __success_unpriv __retval(-1)
+__naked void smod64_non_zero_imm_3(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 4")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_imm_4(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r0 s%%= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 5")
+__success __success_unpriv __retval(-0)
+__naked void smod64_non_zero_imm_5(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 6")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_imm_6(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 7")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_imm_7(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r0 s%%= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 8")
+__success __success_unpriv __retval(1)
+__naked void smod64_non_zero_imm_8(void)
+{
+ asm volatile (" \
+ r0 = 41; \
+ r0 s%%= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 1")
+__success __success_unpriv __retval(-1)
+__naked void smod64_non_zero_reg_1(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r1 = 2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 2")
+__success __success_unpriv __retval(1)
+__naked void smod64_non_zero_reg_2(void)
+{
+ asm volatile (" \
+ r0 = 41; \
+ r1 = -2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 3")
+__success __success_unpriv __retval(-1)
+__naked void smod64_non_zero_reg_3(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r1 = -2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 4")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_reg_4(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r1 = 2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 5")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_reg_5(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = -2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 6")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_reg_6(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r1 = -2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 7")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_reg_7(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = 2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 8")
+__success __success_unpriv __retval(1)
+__naked void smod64_non_zero_reg_8(void)
+{
+ asm volatile (" \
+ r0 = 41; \
+ r1 = 2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, zero divisor")
+__success __success_unpriv __retval(0)
+__naked void sdiv32_zero_divisor(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = 0; \
+ w2 = -1; \
+ w2 s/= w1; \
+ w0 = w2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, zero divisor")
+__success __success_unpriv __retval(0)
+__naked void sdiv64_zero_divisor(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = 0; \
+ r2 = -1; \
+ r2 s/= r1; \
+ r0 = r2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, zero divisor")
+__success __success_unpriv __retval(-1)
+__naked void smod32_zero_divisor(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = 0; \
+ w2 = -1; \
+ w2 s%%= w1; \
+ w0 = w2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, zero divisor")
+__success __success_unpriv __retval(-1)
+__naked void smod64_zero_divisor(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = 0; \
+ r2 = -1; \
+ r2 s%%= r1; \
+ r0 = r2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, overflow r/r, LLONG_MIN/-1")
+__success __retval(1)
+__arch_x86_64
+__xlated("0: r2 = 0x8000000000000000")
+__xlated("2: r3 = -1")
+__xlated("3: r4 = r2")
+__xlated("4: r11 = r3")
+__xlated("5: r11 += 1")
+__xlated("6: if r11 > 0x1 goto pc+4")
+__xlated("7: if r11 == 0x0 goto pc+1")
+__xlated("8: r2 = 0")
+__xlated("9: r2 = -r2")
+__xlated("10: goto pc+1")
+__xlated("11: r2 s/= r3")
+__xlated("12: r0 = 0")
+__xlated("13: if r2 != r4 goto pc+1")
+__xlated("14: r0 = 1")
+__xlated("15: exit")
+__naked void sdiv64_overflow_rr(void)
+{
+ asm volatile (" \
+ r2 = %[llong_min] ll; \
+ r3 = -1; \
+ r4 = r2; \
+ r2 s/= r3; \
+ r0 = 0; \
+ if r2 != r4 goto +1; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(llong_min, LLONG_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, r/r, small_val/-1")
+__success __retval(-5)
+__arch_x86_64
+__xlated("0: r2 = 5")
+__xlated("1: r3 = -1")
+__xlated("2: r11 = r3")
+__xlated("3: r11 += 1")
+__xlated("4: if r11 > 0x1 goto pc+4")
+__xlated("5: if r11 == 0x0 goto pc+1")
+__xlated("6: r2 = 0")
+__xlated("7: r2 = -r2")
+__xlated("8: goto pc+1")
+__xlated("9: r2 s/= r3")
+__xlated("10: r0 = r2")
+__xlated("11: exit")
+__naked void sdiv64_rr_divisor_neg_1(void)
+{
+ asm volatile (" \
+ r2 = 5; \
+ r3 = -1; \
+ r2 s/= r3; \
+ r0 = r2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, overflow r/i, LLONG_MIN/-1")
+__success __retval(1)
+__arch_x86_64
+__xlated("0: r2 = 0x8000000000000000")
+__xlated("2: r4 = r2")
+__xlated("3: r2 = -r2")
+__xlated("4: r0 = 0")
+__xlated("5: if r2 != r4 goto pc+1")
+__xlated("6: r0 = 1")
+__xlated("7: exit")
+__naked void sdiv64_overflow_ri(void)
+{
+ asm volatile (" \
+ r2 = %[llong_min] ll; \
+ r4 = r2; \
+ r2 s/= -1; \
+ r0 = 0; \
+ if r2 != r4 goto +1; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(llong_min, LLONG_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, r/i, small_val/-1")
+__success __retval(-5)
+__arch_x86_64
+__xlated("0: r2 = 5")
+__xlated("1: r4 = r2")
+__xlated("2: r2 = -r2")
+__xlated("3: r0 = r2")
+__xlated("4: exit")
+__naked void sdiv64_ri_divisor_neg_1(void)
+{
+ asm volatile (" \
+ r2 = 5; \
+ r4 = r2; \
+ r2 s/= -1; \
+ r0 = r2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, overflow r/r, INT_MIN/-1")
+__success __retval(1)
+__arch_x86_64
+__xlated("0: w2 = -2147483648")
+__xlated("1: w3 = -1")
+__xlated("2: w4 = w2")
+__xlated("3: r11 = r3")
+__xlated("4: w11 += 1")
+__xlated("5: if w11 > 0x1 goto pc+4")
+__xlated("6: if w11 == 0x0 goto pc+1")
+__xlated("7: w2 = 0")
+__xlated("8: w2 = -w2")
+__xlated("9: goto pc+1")
+__xlated("10: w2 s/= w3")
+__xlated("11: r0 = 0")
+__xlated("12: if w2 != w4 goto pc+1")
+__xlated("13: r0 = 1")
+__xlated("14: exit")
+__naked void sdiv32_overflow_rr(void)
+{
+ asm volatile (" \
+ w2 = %[int_min]; \
+ w3 = -1; \
+ w4 = w2; \
+ w2 s/= w3; \
+ r0 = 0; \
+ if w2 != w4 goto +1; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, r/r, small_val/-1")
+__success __retval(5)
+__arch_x86_64
+__xlated("0: w2 = -5")
+__xlated("1: w3 = -1")
+__xlated("2: w4 = w2")
+__xlated("3: r11 = r3")
+__xlated("4: w11 += 1")
+__xlated("5: if w11 > 0x1 goto pc+4")
+__xlated("6: if w11 == 0x0 goto pc+1")
+__xlated("7: w2 = 0")
+__xlated("8: w2 = -w2")
+__xlated("9: goto pc+1")
+__xlated("10: w2 s/= w3")
+__xlated("11: w0 = w2")
+__xlated("12: exit")
+__naked void sdiv32_rr_divisor_neg_1(void)
+{
+ asm volatile (" \
+ w2 = -5; \
+ w3 = -1; \
+ w4 = w2; \
+ w2 s/= w3; \
+ w0 = w2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, overflow r/i, INT_MIN/-1")
+__success __retval(1)
+__arch_x86_64
+__xlated("0: w2 = -2147483648")
+__xlated("1: w4 = w2")
+__xlated("2: w2 = -w2")
+__xlated("3: r0 = 0")
+__xlated("4: if w2 != w4 goto pc+1")
+__xlated("5: r0 = 1")
+__xlated("6: exit")
+__naked void sdiv32_overflow_ri(void)
+{
+ asm volatile (" \
+ w2 = %[int_min]; \
+ w4 = w2; \
+ w2 s/= -1; \
+ r0 = 0; \
+ if w2 != w4 goto +1; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, r/i, small_val/-1")
+__success __retval(-5)
+__arch_x86_64
+__xlated("0: w2 = 5")
+__xlated("1: w4 = w2")
+__xlated("2: w2 = -w2")
+__xlated("3: w0 = w2")
+__xlated("4: exit")
+__naked void sdiv32_ri_divisor_neg_1(void)
+{
+ asm volatile (" \
+ w2 = 5; \
+ w4 = w2; \
+ w2 s/= -1; \
+ w0 = w2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, overflow r/r, LLONG_MIN/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: r2 = 0x8000000000000000")
+__xlated("2: r3 = -1")
+__xlated("3: r4 = r2")
+__xlated("4: r11 = r3")
+__xlated("5: r11 += 1")
+__xlated("6: if r11 > 0x1 goto pc+3")
+__xlated("7: if r11 == 0x1 goto pc+3")
+__xlated("8: w2 = 0")
+__xlated("9: goto pc+1")
+__xlated("10: r2 s%= r3")
+__xlated("11: r0 = r2")
+__xlated("12: exit")
+__naked void smod64_overflow_rr(void)
+{
+ asm volatile (" \
+ r2 = %[llong_min] ll; \
+ r3 = -1; \
+ r4 = r2; \
+ r2 s%%= r3; \
+ r0 = r2; \
+ exit; \
+" :
+ : __imm_const(llong_min, LLONG_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, r/r, small_val/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: r2 = 5")
+__xlated("1: r3 = -1")
+__xlated("2: r4 = r2")
+__xlated("3: r11 = r3")
+__xlated("4: r11 += 1")
+__xlated("5: if r11 > 0x1 goto pc+3")
+__xlated("6: if r11 == 0x1 goto pc+3")
+__xlated("7: w2 = 0")
+__xlated("8: goto pc+1")
+__xlated("9: r2 s%= r3")
+__xlated("10: r0 = r2")
+__xlated("11: exit")
+__naked void smod64_rr_divisor_neg_1(void)
+{
+ asm volatile (" \
+ r2 = 5; \
+ r3 = -1; \
+ r4 = r2; \
+ r2 s%%= r3; \
+ r0 = r2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, overflow r/i, LLONG_MIN/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: r2 = 0x8000000000000000")
+__xlated("2: r4 = r2")
+__xlated("3: w2 = 0")
+__xlated("4: r0 = r2")
+__xlated("5: exit")
+__naked void smod64_overflow_ri(void)
+{
+ asm volatile (" \
+ r2 = %[llong_min] ll; \
+ r4 = r2; \
+ r2 s%%= -1; \
+ r0 = r2; \
+ exit; \
+" :
+ : __imm_const(llong_min, LLONG_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, r/i, small_val/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: r2 = 5")
+__xlated("1: r4 = r2")
+__xlated("2: w2 = 0")
+__xlated("3: r0 = r2")
+__xlated("4: exit")
+__naked void smod64_ri_divisor_neg_1(void)
+{
+ asm volatile (" \
+ r2 = 5; \
+ r4 = r2; \
+ r2 s%%= -1; \
+ r0 = r2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, overflow r/r, INT_MIN/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: w2 = -2147483648")
+__xlated("1: w3 = -1")
+__xlated("2: w4 = w2")
+__xlated("3: r11 = r3")
+__xlated("4: w11 += 1")
+__xlated("5: if w11 > 0x1 goto pc+3")
+__xlated("6: if w11 == 0x1 goto pc+4")
+__xlated("7: w2 = 0")
+__xlated("8: goto pc+1")
+__xlated("9: w2 s%= w3")
+__xlated("10: goto pc+1")
+__xlated("11: w2 = w2")
+__xlated("12: r0 = r2")
+__xlated("13: exit")
+__naked void smod32_overflow_rr(void)
+{
+ asm volatile (" \
+ w2 = %[int_min]; \
+ w3 = -1; \
+ w4 = w2; \
+ w2 s%%= w3; \
+ r0 = r2; \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, r/r, small_val/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: w2 = -5")
+__xlated("1: w3 = -1")
+__xlated("2: w4 = w2")
+__xlated("3: r11 = r3")
+__xlated("4: w11 += 1")
+__xlated("5: if w11 > 0x1 goto pc+3")
+__xlated("6: if w11 == 0x1 goto pc+4")
+__xlated("7: w2 = 0")
+__xlated("8: goto pc+1")
+__xlated("9: w2 s%= w3")
+__xlated("10: goto pc+1")
+__xlated("11: w2 = w2")
+__xlated("12: r0 = r2")
+__xlated("13: exit")
+__naked void smod32_rr_divisor_neg_1(void)
+{
+ asm volatile (" \
+ w2 = -5; \
+ w3 = -1; \
+ w4 = w2; \
+ w2 s%%= w3; \
+ r0 = r2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, overflow r/i, INT_MIN/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: w2 = -2147483648")
+__xlated("1: w4 = w2")
+__xlated("2: w2 = 0")
+__xlated("3: r0 = r2")
+__xlated("4: exit")
+__naked void smod32_overflow_ri(void)
+{
+ asm volatile (" \
+ w2 = %[int_min]; \
+ w4 = w2; \
+ w2 s%%= -1; \
+ r0 = r2; \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, r/i, small_val/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: w2 = 5")
+__xlated("1: w4 = w2")
+__xlated("2: w2 = 0")
+__xlated("3: w0 = w2")
+__xlated("4: exit")
+__naked void smod32_ri_divisor_neg_1(void)
+{
+ asm volatile (" \
+ w2 = 5; \
+ w4 = w2; \
+ w2 s%%= -1; \
+ w0 = w2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+#else
+
+SEC("socket")
+__description("cpuv4 is not supported by compiler or jit, use a dummy test")
+__success
+int dummy_test(void)
+{
+ return 0;
+}
+
+#endif
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_search_pruning.c b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c
new file mode 100644
index 000000000000..f40e57251e94
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */
+
+#include <linux/bpf.h>
+#include <../../../include/linux/filter.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("socket")
+__description("pointer/scalar confusion in state equality check (way 1)")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value")
+__retval(POINTER_VALUE)
+__naked void state_equality_check_way_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r0 = *(u64*)(r0 + 0); \
+ goto l1_%=; \
+l0_%=: r0 = r10; \
+l1_%=: goto l2_%=; \
+l2_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("pointer/scalar confusion in state equality check (way 2)")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value")
+__retval(POINTER_VALUE)
+__naked void state_equality_check_way_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ r0 = r10; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r0 + 0); \
+l1_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("lwt_in")
+__description("liveness pruning and write screening")
+__failure __msg("R0 !read_ok")
+__naked void liveness_pruning_and_write_screening(void)
+{
+ asm volatile (" \
+ /* Get an unknown value */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* branch conditions teach us nothing about R2 */\
+ if r2 >= 0 goto l0_%=; \
+ r0 = 0; \
+l0_%=: if r2 >= 0 goto l1_%=; \
+ r0 = 0; \
+l1_%=: exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("varlen_map_value_access pruning")
+__failure __msg("R0 unbounded memory access")
+__failure_unpriv __msg_unpriv("R0 leaks addr")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void varlen_map_value_access_pruning(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r0 + 0); \
+ w2 = %[max_entries]; \
+ if r2 s> r1 goto l1_%=; \
+ w1 = 0; \
+l1_%=: w1 <<= 2; \
+ r0 += r1; \
+ goto l2_%=; \
+l2_%=: r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(max_entries, MAX_ENTRIES),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("search pruning: all branches should be verified (nop operation)")
+__failure __msg("R6 invalid mem access 'scalar'")
+__naked void should_be_verified_nop_operation(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r3 = *(u64*)(r0 + 0); \
+ if r3 == 0xbeef goto l1_%=; \
+ r4 = 0; \
+ goto l2_%=; \
+l1_%=: r4 = 1; \
+l2_%=: *(u64*)(r10 - 16) = r4; \
+ call %[bpf_ktime_get_ns]; \
+ r5 = *(u64*)(r10 - 16); \
+ if r5 == 0 goto l0_%=; \
+ r6 = 0; \
+ r1 = 0xdead; \
+ *(u64*)(r6 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("search pruning: all branches should be verified (invalid stack access)")
+/* in privileged mode reads from uninitialized stack locations are permitted */
+__success __failure_unpriv
+__msg_unpriv("invalid read from stack off -16+0 size 8")
+__retval(0)
+__naked void be_verified_invalid_stack_access(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r3 = *(u64*)(r0 + 0); \
+ r4 = 0; \
+ if r3 == 0xbeef goto l1_%=; \
+ *(u64*)(r10 - 16) = r4; \
+ goto l2_%=; \
+l1_%=: *(u64*)(r10 - 24) = r4; \
+l2_%=: call %[bpf_ktime_get_ns]; \
+ r5 = *(u64*)(r10 - 16); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("precision tracking for u32 spill/fill")
+__failure __msg("R0 min value is outside of the allowed memory range")
+__naked void tracking_for_u32_spill_fill(void)
+{
+ asm volatile (" \
+ r7 = r1; \
+ call %[bpf_get_prandom_u32]; \
+ w6 = 32; \
+ if r0 == 0 goto l0_%=; \
+ w6 = 4; \
+l0_%=: /* Additional insns to introduce a pruning point. */\
+ call %[bpf_get_prandom_u32]; \
+ r3 = 0; \
+ r3 = 0; \
+ if r0 == 0 goto l1_%=; \
+ r3 = 0; \
+l1_%=: /* u32 spill/fill */ \
+ *(u32*)(r10 - 8) = r6; \
+ r8 = *(u32*)(r10 - 8); \
+ /* out-of-bound map value access for r6=32 */ \
+ r1 = 0; \
+ *(u64*)(r10 - 16) = r1; \
+ r2 = r10; \
+ r2 += -16; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r0 += r8; \
+ r1 = *(u32*)(r0 + 0); \
+l2_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tracepoint")
+__description("precision tracking for u32 spills, u64 fill")
+__failure __msg("div by zero")
+__naked void for_u32_spills_u64_fill(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r6 = r0; \
+ w7 = 0xffffffff; \
+ /* Additional insns to introduce a pruning point. */\
+ r3 = 1; \
+ r3 = 1; \
+ r3 = 1; \
+ r3 = 1; \
+ call %[bpf_get_prandom_u32]; \
+ if r0 == 0 goto l0_%=; \
+ r3 = 1; \
+l0_%=: w3 /= 0; \
+ /* u32 spills, u64 fill */ \
+ *(u32*)(r10 - 4) = r6; \
+ *(u32*)(r10 - 8) = r7; \
+ r8 = *(u64*)(r10 - 8); \
+ /* if r8 != X goto pc+1 r8 known in fallthrough branch */\
+ if r8 != 0xffffffff goto l1_%=; \
+ r3 = 1; \
+l1_%=: /* if r8 == X goto pc+1 condition always true on first\
+ * traversal, so starts backtracking to mark r8 as requiring\
+ * precision. r7 marked as needing precision. r6 not marked\
+ * since it's not tracked. \
+ */ \
+ if r8 == 0xffffffff goto l2_%=; \
+ /* fails if r8 correctly marked unknown after fill. */\
+ w3 /= 0; \
+l2_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("allocated_stack")
+__success __msg("processed 15 insns")
+__success_unpriv __msg_unpriv("") __log_level(1) __retval(0)
+__naked void allocated_stack(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ call %[bpf_get_prandom_u32]; \
+ r7 = r0; \
+ if r0 == 0 goto l0_%=; \
+ r0 = 0; \
+ *(u64*)(r10 - 8) = r6; \
+ r6 = *(u64*)(r10 - 8); \
+ *(u8*)(r10 - 9) = r7; \
+ r7 = *(u8*)(r10 - 9); \
+l0_%=: if r0 != 0 goto l1_%=; \
+l1_%=: if r0 != 0 goto l2_%=; \
+l2_%=: if r0 != 0 goto l3_%=; \
+l3_%=: if r0 != 0 goto l4_%=; \
+l4_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* The test performs a conditional 64-bit write to a stack location
+ * fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
+ * then data is read from fp[-8]. This sequence is unsafe.
+ *
+ * The test would be mistakenly marked as safe w/o dst register parent
+ * preservation in verifier.c:copy_register_state() function.
+ *
+ * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
+ * checkpoint state after conditional 64-bit assignment.
+ */
+
+SEC("socket")
+__description("write tracking and register parent chain bug")
+/* in privileged mode reads from uninitialized stack locations are permitted */
+__success __failure_unpriv
+__msg_unpriv("invalid read from stack off -8+1 size 8")
+__retval(0) __flag(BPF_F_TEST_STATE_FREQ)
+__naked void and_register_parent_chain_bug(void)
+{
+ asm volatile (" \
+ /* r6 = ktime_get_ns() */ \
+ call %[bpf_ktime_get_ns]; \
+ r6 = r0; \
+ /* r0 = ktime_get_ns() */ \
+ call %[bpf_ktime_get_ns]; \
+ /* if r0 > r6 goto +1 */ \
+ if r0 > r6 goto l0_%=; \
+ /* *(u64 *)(r10 - 8) = 0xdeadbeef */ \
+ r0 = 0xdeadbeef; \
+ *(u64*)(r10 - 8) = r0; \
+l0_%=: r1 = 42; \
+ *(u8*)(r10 - 8) = r1; \
+ r2 = *(u64*)(r10 - 8); \
+ /* exit(0) */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Without checkpoint forcibly inserted at the back-edge a loop this
+ * test would take a very long time to verify.
+ */
+SEC("kprobe")
+__failure __log_level(4)
+__msg("BPF program is too large.")
+__naked void short_loop1(void)
+{
+ asm volatile (
+ " r7 = *(u16 *)(r1 +0);"
+ "1: r7 += 0x1ab064b9;"
+ " .8byte %[jset];" /* same as 'if r7 & 0x702000 goto 1b;' */
+ " r7 &= 0x1ee60e;"
+ " r7 += r1;"
+ " if r7 s> 0x37d2 goto +0;"
+ " r0 = 0;"
+ " exit;"
+ :
+ : __imm_insn(jset, BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x702000, -2))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_sock.c b/tools/testing/selftests/bpf/progs/verifier_sock.c
new file mode 100644
index 000000000000..0d5e56dffabb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_sock.c
@@ -0,0 +1,1096 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/sock.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
+
+struct {
+ __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} map_reuseport_array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} map_sockhash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} map_sockmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_XSKMAP);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} map_xskmap SEC(".maps");
+
+struct val {
+ int cnt;
+ struct bpf_spin_lock l;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(max_entries, 0);
+ __type(key, int);
+ __type(value, struct val);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} sk_storage_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("cgroup/skb")
+__description("skb->sk: no NULL check")
+__failure __msg("invalid mem access 'sock_common_or_null'")
+__failure_unpriv
+__naked void skb_sk_no_null_check(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ r0 = *(u32*)(r1 + 0); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("skb->sk: sk->family [non fullsock field]")
+__success __success_unpriv __retval(0)
+__naked void sk_family_non_fullsock_field_1(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("skb->sk: sk->type [fullsock field]")
+__failure __msg("invalid sock_common access")
+__failure_unpriv
+__naked void sk_sk_type_fullsock_field_1(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("bpf_sk_fullsock(skb->sk): no !skb->sk check")
+__failure __msg("type=sock_common_or_null expected=sock_common")
+__failure_unpriv
+__naked void sk_no_skb_sk_check_1(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ call %[bpf_sk_fullsock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): no NULL check on ret")
+__failure __msg("invalid mem access 'sock_or_null'")
+__failure_unpriv
+__naked void no_null_check_on_ret_1(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ r0 = *(u32*)(r0 + %[bpf_sock_type]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): sk->type [fullsock field]")
+__success __success_unpriv __retval(0)
+__naked void sk_sk_type_fullsock_field_2(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): sk->family [non fullsock field]")
+__success __success_unpriv __retval(0)
+__naked void sk_family_non_fullsock_field_2(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ exit; \
+l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): sk->state [narrow load]")
+__success __success_unpriv __retval(0)
+__naked void sk_sk_state_narrow_load(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)")
+__success __success_unpriv __retval(0)
+__naked void port_word_load_backward_compatibility(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): sk->dst_port [half load]")
+__success __success_unpriv __retval(0)
+__naked void sk_dst_port_half_load(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)")
+__failure __msg("invalid sock access")
+__failure_unpriv
+__naked void dst_port_half_load_invalid_1(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): sk->dst_port [byte load]")
+__success __success_unpriv __retval(0)
+__naked void sk_dst_port_byte_load(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \
+ r2 = *(u8*)(r0 + %[__imm_0]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)")
+__failure __msg("invalid sock access")
+__failure_unpriv
+__naked void dst_port_byte_load_invalid(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)")
+__failure __msg("invalid sock access")
+__failure_unpriv
+__naked void dst_port_half_load_invalid_2(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]")
+__success __success_unpriv __retval(0)
+__naked void dst_ip6_load_2nd_byte(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): sk->type [narrow load]")
+__success __success_unpriv __retval(0)
+__naked void sk_sk_type_narrow_load(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): sk->protocol [narrow load]")
+__success __success_unpriv __retval(0)
+__naked void sk_sk_protocol_narrow_load(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("sk_fullsock(skb->sk): beyond last field")
+__failure __msg("invalid sock access")
+__failure_unpriv
+__naked void skb_sk_beyond_last_field_1(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("bpf_tcp_sock(skb->sk): no !skb->sk check")
+__failure __msg("type=sock_common_or_null expected=sock_common")
+__failure_unpriv
+__naked void sk_no_skb_sk_check_2(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ call %[bpf_tcp_sock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_tcp_sock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("bpf_tcp_sock(skb->sk): no NULL check on ret")
+__failure __msg("invalid mem access 'tcp_sock_or_null'")
+__failure_unpriv
+__naked void no_null_check_on_ret_2(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_tcp_sock]; \
+ r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_tcp_sock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("bpf_tcp_sock(skb->sk): tp->snd_cwnd")
+__success __success_unpriv __retval(0)
+__naked void skb_sk_tp_snd_cwnd_1(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_tcp_sock]; \
+ if r0 != 0 goto l1_%=; \
+ exit; \
+l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_tcp_sock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("bpf_tcp_sock(skb->sk): tp->bytes_acked")
+__success __success_unpriv __retval(0)
+__naked void skb_sk_tp_bytes_acked(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_tcp_sock]; \
+ if r0 != 0 goto l1_%=; \
+ exit; \
+l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_tcp_sock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("bpf_tcp_sock(skb->sk): beyond last field")
+__failure __msg("invalid tcp_sock access")
+__failure_unpriv
+__naked void skb_sk_beyond_last_field_2(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_tcp_sock]; \
+ if r0 != 0 goto l1_%=; \
+ exit; \
+l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_tcp_sock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked))
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd")
+__success __success_unpriv __retval(0)
+__naked void skb_sk_tp_snd_cwnd_2(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ exit; \
+l1_%=: r1 = r0; \
+ call %[bpf_tcp_sock]; \
+ if r0 != 0 goto l2_%=; \
+ exit; \
+l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm(bpf_tcp_sock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
+ __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("bpf_sk_release(skb->sk)")
+__failure __msg("R1 must be referenced when passed to release function")
+__naked void bpf_sk_release_skb_sk(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 == 0 goto l0_%=; \
+ call %[bpf_sk_release]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_release),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("bpf_sk_release(bpf_sk_fullsock(skb->sk))")
+__failure __msg("R1 must be referenced when passed to release function")
+__naked void bpf_sk_fullsock_skb_sk(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ exit; \
+l1_%=: r1 = r0; \
+ call %[bpf_sk_release]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm(bpf_sk_release),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("bpf_sk_release(bpf_tcp_sock(skb->sk))")
+__failure __msg("R1 must be referenced when passed to release function")
+__naked void bpf_tcp_sock_skb_sk(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_tcp_sock]; \
+ if r0 != 0 goto l1_%=; \
+ exit; \
+l1_%=: r1 = r0; \
+ call %[bpf_sk_release]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_sk_release),
+ __imm(bpf_tcp_sock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL")
+__success __retval(0)
+__naked void sk_null_0_value_null(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r4 = 0; \
+ r3 = 0; \
+ r2 = r0; \
+ r1 = %[sk_storage_map] ll; \
+ call %[bpf_sk_storage_get]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm(bpf_sk_storage_get),
+ __imm_addr(sk_storage_map),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("sk_storage_get(map, skb->sk, 1, 1): value == 1")
+__failure __msg("R3 type=scalar expected=fp")
+__naked void sk_1_1_value_1(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r4 = 1; \
+ r3 = 1; \
+ r2 = r0; \
+ r1 = %[sk_storage_map] ll; \
+ call %[bpf_sk_storage_get]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm(bpf_sk_storage_get),
+ __imm_addr(sk_storage_map),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value")
+__success __retval(0)
+__naked void stack_value_1_stack_value(void)
+{
+ asm volatile (" \
+ r2 = 0; \
+ *(u64*)(r10 - 8) = r2; \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: call %[bpf_sk_fullsock]; \
+ if r0 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r4 = 1; \
+ r3 = r10; \
+ r3 += -8; \
+ r2 = r0; \
+ r1 = %[sk_storage_map] ll; \
+ call %[bpf_sk_storage_get]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_fullsock),
+ __imm(bpf_sk_storage_get),
+ __imm_addr(sk_storage_map),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("bpf_map_lookup_elem(smap, &key)")
+__failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem")
+__naked void map_lookup_elem_smap_key(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[sk_storage_map] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(sk_storage_map)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id")
+__success __retval(0)
+__naked void xskmap_key_xs_queue_id(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_xskmap] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_xskmap),
+ __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
+ : __clobber_all);
+}
+
+SEC("sk_skb")
+__description("bpf_map_lookup_elem(sockmap, &key)")
+__failure __msg("Unreleased reference id=2 alloc_insn=6")
+__naked void map_lookup_elem_sockmap_key(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_sockmap] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_sockmap)
+ : __clobber_all);
+}
+
+SEC("sk_skb")
+__description("bpf_map_lookup_elem(sockhash, &key)")
+__failure __msg("Unreleased reference id=2 alloc_insn=6")
+__naked void map_lookup_elem_sockhash_key(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_sockhash] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_sockhash)
+ : __clobber_all);
+}
+
+SEC("sk_skb")
+__description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
+__success
+__naked void field_bpf_sk_release_sk_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_sockmap] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r1 = r0; \
+ r0 = *(u32*)(r0 + %[bpf_sock_type]); \
+ call %[bpf_sk_release]; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_sk_release),
+ __imm_addr(map_sockmap),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
+ : __clobber_all);
+}
+
+SEC("sk_skb")
+__description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
+__success
+__naked void field_bpf_sk_release_sk_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_sockhash] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r1 = r0; \
+ r0 = *(u32*)(r0 + %[bpf_sock_type]); \
+ call %[bpf_sk_release]; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_sk_release),
+ __imm_addr(map_sockhash),
+ __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
+ : __clobber_all);
+}
+
+SEC("sk_reuseport")
+__description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)")
+__success
+__naked void ctx_reuseport_array_key_flags(void)
+{
+ asm volatile (" \
+ r4 = 0; \
+ r2 = 0; \
+ *(u32*)(r10 - 4) = r2; \
+ r3 = r10; \
+ r3 += -4; \
+ r2 = %[map_reuseport_array] ll; \
+ call %[bpf_sk_select_reuseport]; \
+ exit; \
+" :
+ : __imm(bpf_sk_select_reuseport),
+ __imm_addr(map_reuseport_array)
+ : __clobber_all);
+}
+
+SEC("sk_reuseport")
+__description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)")
+__success
+__naked void reuseport_ctx_sockmap_key_flags(void)
+{
+ asm volatile (" \
+ r4 = 0; \
+ r2 = 0; \
+ *(u32*)(r10 - 4) = r2; \
+ r3 = r10; \
+ r3 += -4; \
+ r2 = %[map_sockmap] ll; \
+ call %[bpf_sk_select_reuseport]; \
+ exit; \
+" :
+ : __imm(bpf_sk_select_reuseport),
+ __imm_addr(map_sockmap)
+ : __clobber_all);
+}
+
+SEC("sk_reuseport")
+__description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)")
+__success
+__naked void reuseport_ctx_sockhash_key_flags(void)
+{
+ asm volatile (" \
+ r4 = 0; \
+ r2 = 0; \
+ *(u32*)(r10 - 4) = r2; \
+ r3 = r10; \
+ r3 += -4; \
+ r2 = %[map_sockmap] ll; \
+ call %[bpf_sk_select_reuseport]; \
+ exit; \
+" :
+ : __imm(bpf_sk_select_reuseport),
+ __imm_addr(map_sockmap)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("mark null check on return value of bpf_skc_to helpers")
+__failure __msg("invalid mem access")
+__naked void of_bpf_skc_to_helpers(void)
+{
+ asm volatile (" \
+ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
+ if r1 != 0 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: r6 = r1; \
+ call %[bpf_skc_to_tcp_sock]; \
+ r7 = r0; \
+ r1 = r6; \
+ call %[bpf_skc_to_tcp_request_sock]; \
+ r8 = r0; \
+ if r8 != 0 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u8*)(r7 + 0); \
+ exit; \
+" :
+ : __imm(bpf_skc_to_tcp_request_sock),
+ __imm(bpf_skc_to_tcp_sock),
+ __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
+ : __clobber_all);
+}
+
+SEC("cgroup/post_bind4")
+__description("sk->src_ip6[0] [load 1st byte]")
+__failure __msg("invalid bpf_context access off=28 size=2")
+__naked void post_bind4_read_src_ip6(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r7 = *(u16*)(r6 + %[bpf_sock_src_ip6_0]); \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(bpf_sock_src_ip6_0, offsetof(struct bpf_sock, src_ip6[0]))
+ : __clobber_all);
+}
+
+SEC("cgroup/post_bind4")
+__description("sk->mark [load mark]")
+__failure __msg("invalid bpf_context access off=16 size=2")
+__naked void post_bind4_read_mark(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r7 = *(u16*)(r6 + %[bpf_sock_mark]); \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark))
+ : __clobber_all);
+}
+
+SEC("cgroup/post_bind6")
+__description("sk->src_ip4 [load src_ip4]")
+__failure __msg("invalid bpf_context access off=24 size=2")
+__naked void post_bind6_read_src_ip4(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r7 = *(u16*)(r6 + %[bpf_sock_src_ip4]); \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(bpf_sock_src_ip4, offsetof(struct bpf_sock, src_ip4))
+ : __clobber_all);
+}
+
+SEC("cgroup/sock_create")
+__description("sk->src_port [word load]")
+__failure __msg("invalid bpf_context access off=44 size=2")
+__naked void sock_create_read_src_port(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r7 = *(u16*)(r6 + %[bpf_sock_src_port]); \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port))
+ : __clobber_all);
+}
+
+__noinline
+long skb_pull_data2(struct __sk_buff *sk, __u32 len)
+{
+ return bpf_skb_pull_data(sk, len);
+}
+
+__noinline
+long skb_pull_data1(struct __sk_buff *sk, __u32 len)
+{
+ return skb_pull_data2(sk, len);
+}
+
+/* global function calls bpf_skb_pull_data(), which invalidates packet
+ * pointers established before global function call.
+ */
+SEC("tc")
+__failure __msg("invalid mem access")
+int invalidate_pkt_pointers_from_global_func(struct __sk_buff *sk)
+{
+ int *p = (void *)(long)sk->data;
+
+ if ((void *)(p + 1) > (void *)(long)sk->data_end)
+ return TCX_DROP;
+ skb_pull_data1(sk, 0);
+ *p = 42; /* this is unsafe */
+ return TCX_PASS;
+}
+
+__noinline
+int tail_call(struct __sk_buff *sk)
+{
+ bpf_tail_call_static(sk, &jmp_table, 0);
+ return 0;
+}
+
+/* Tail calls invalidate packet pointers. */
+SEC("tc")
+__failure __msg("invalid mem access")
+int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk)
+{
+ int *p = (void *)(long)sk->data;
+
+ if ((void *)(p + 1) > (void *)(long)sk->data_end)
+ return TCX_DROP;
+ tail_call(sk);
+ *p = 42; /* this is unsafe */
+ return TCX_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_sock_addr.c b/tools/testing/selftests/bpf/progs/verifier_sock_addr.c
new file mode 100644
index 000000000000..9c31448a0f52
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_sock_addr.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf_sockopt_helpers.h>
+#include "bpf_misc.h"
+
+SEC("cgroup/recvmsg4")
+__success
+int recvmsg4_good_return_code(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/recvmsg4")
+__failure __msg("At program exit the register R0 has smin=0 smax=0 should have been in [1, 1]")
+int recvmsg4_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/recvmsg6")
+__success
+int recvmsg6_good_return_code(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/recvmsg6")
+__failure __msg("At program exit the register R0 has smin=0 smax=0 should have been in [1, 1]")
+int recvmsg6_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/recvmsg_unix")
+__success
+int recvmsg_unix_good_return_code(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/recvmsg_unix")
+__failure __msg("At program exit the register R0 has smin=0 smax=0 should have been in [1, 1]")
+int recvmsg_unix_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/sendmsg4")
+__success
+int sendmsg4_good_return_code_0(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/sendmsg4")
+__success
+int sendmsg4_good_return_code_1(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/sendmsg4")
+__failure __msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]")
+int sendmsg4_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 2;
+}
+
+SEC("cgroup/sendmsg6")
+__success
+int sendmsg6_good_return_code_0(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/sendmsg6")
+__success
+int sendmsg6_good_return_code_1(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/sendmsg6")
+__failure __msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]")
+int sendmsg6_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 2;
+}
+
+SEC("cgroup/sendmsg_unix")
+__success
+int sendmsg_unix_good_return_code_0(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/sendmsg_unix")
+__success
+int sendmsg_unix_good_return_code_1(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/sendmsg_unix")
+__failure __msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]")
+int sendmsg_unix_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 2;
+}
+
+SEC("cgroup/getpeername4")
+__success
+int getpeername4_good_return_code(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getpeername4")
+__failure __msg("At program exit the register R0 has smin=0 smax=0 should have been in [1, 1]")
+int getpeername4_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/getpeername6")
+__success
+int getpeername6_good_return_code(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getpeername6")
+__failure __msg("At program exit the register R0 has smin=0 smax=0 should have been in [1, 1]")
+int getpeername6_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/getpeername_unix")
+__success
+int getpeername_unix_good_return_code(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getpeername_unix")
+__failure __msg("At program exit the register R0 has smin=0 smax=0 should have been in [1, 1]")
+int getpeername_unix_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/getsockname4")
+__success
+int getsockname4_good_return_code(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getsockname4")
+__failure __msg("At program exit the register R0 has smin=0 smax=0 should have been in [1, 1]")
+int getsockname4_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/getsockname6")
+__success
+int getsockname6_good_return_code(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getsockname6")
+__failure __msg("At program exit the register R0 has smin=0 smax=0 should have been in [1, 1]")
+int getsockname6_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/getsockname_unix")
+__success
+int getsockname_unix_good_return_code(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getsockname_unix")
+__failure __msg("At program exit the register R0 has smin=0 smax=0 should have been in [1, 1]")
+int getsockname_unix_unix_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/bind4")
+__success
+int bind4_good_return_code_0(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/bind4")
+__success
+int bind4_good_return_code_1(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/bind4")
+__success
+int bind4_good_return_code_2(struct bpf_sock_addr *ctx)
+{
+ return 2;
+}
+
+SEC("cgroup/bind4")
+__success
+int bind4_good_return_code_3(struct bpf_sock_addr *ctx)
+{
+ return 3;
+}
+
+SEC("cgroup/bind4")
+__failure __msg("At program exit the register R0 has smin=4 smax=4 should have been in [0, 3]")
+int bind4_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 4;
+}
+
+SEC("cgroup/bind6")
+__success
+int bind6_good_return_code_0(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/bind6")
+__success
+int bind6_good_return_code_1(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/bind6")
+__success
+int bind6_good_return_code_2(struct bpf_sock_addr *ctx)
+{
+ return 2;
+}
+
+SEC("cgroup/bind6")
+__success
+int bind6_good_return_code_3(struct bpf_sock_addr *ctx)
+{
+ return 3;
+}
+
+SEC("cgroup/bind6")
+__failure __msg("At program exit the register R0 has smin=4 smax=4 should have been in [0, 3]")
+int bind6_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 4;
+}
+
+SEC("cgroup/connect4")
+__success
+int connect4_good_return_code_0(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/connect4")
+__success
+int connect4_good_return_code_1(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/connect4")
+__failure __msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]")
+int connect4_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 2;
+}
+
+SEC("cgroup/connect6")
+__success
+int connect6_good_return_code_0(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/connect6")
+__success
+int connect6_good_return_code_1(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/connect6")
+__failure __msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]")
+int connect6_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 2;
+}
+
+SEC("cgroup/connect_unix")
+__success
+int connect_unix_good_return_code_0(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+SEC("cgroup/connect_unix")
+__success
+int connect_unix_good_return_code_1(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/connect_unix")
+__failure __msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]")
+int connect_unix_bad_return_code(struct bpf_sock_addr *ctx)
+{
+ return 2;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c b/tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c
new file mode 100644
index 000000000000..fe4b123187b8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+
+#define __always_unused __attribute__((unused))
+
+char _license[] SEC("license") = "GPL";
+
+struct sock {
+} __attribute__((preserve_access_index));
+
+struct bpf_iter__sockmap {
+ union {
+ struct sock *sk;
+ };
+} __attribute__((preserve_access_index));
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} sockhash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} sockmap SEC(".maps");
+
+enum { CG_OK = 1 };
+
+int zero = 0;
+
+static __always_inline void test_sockmap_delete(void)
+{
+ bpf_map_delete_elem(&sockmap, &zero);
+ bpf_map_delete_elem(&sockhash, &zero);
+}
+
+static __always_inline void test_sockmap_update(void *sk)
+{
+ if (sk) {
+ bpf_map_update_elem(&sockmap, &zero, sk, BPF_ANY);
+ bpf_map_update_elem(&sockhash, &zero, sk, BPF_ANY);
+ }
+}
+
+static __always_inline void test_sockmap_lookup_and_update(void)
+{
+ struct bpf_sock *sk = bpf_map_lookup_elem(&sockmap, &zero);
+
+ if (sk) {
+ test_sockmap_update(sk);
+ bpf_sk_release(sk);
+ }
+}
+
+static __always_inline void test_sockmap_mutate(void *sk)
+{
+ test_sockmap_delete();
+ test_sockmap_update(sk);
+}
+
+static __always_inline void test_sockmap_lookup_and_mutate(void)
+{
+ test_sockmap_delete();
+ test_sockmap_lookup_and_update();
+}
+
+SEC("action")
+__success
+int test_sched_act(struct __sk_buff *skb)
+{
+ test_sockmap_mutate(skb->sk);
+ return 0;
+}
+
+SEC("classifier")
+__success
+int test_sched_cls(struct __sk_buff *skb)
+{
+ test_sockmap_mutate(skb->sk);
+ return 0;
+}
+
+SEC("flow_dissector")
+__success
+int test_flow_dissector_delete(struct __sk_buff *skb __always_unused)
+{
+ test_sockmap_delete();
+ return 0;
+}
+
+SEC("flow_dissector")
+__failure __msg("program of this type cannot use helper bpf_sk_release")
+int test_flow_dissector_update(struct __sk_buff *skb __always_unused)
+{
+ test_sockmap_lookup_and_update(); /* no access to skb->sk */
+ return 0;
+}
+
+SEC("iter/sockmap")
+__success
+int test_trace_iter(struct bpf_iter__sockmap *ctx)
+{
+ test_sockmap_mutate(ctx->sk);
+ return 0;
+}
+
+SEC("raw_tp/kfree")
+__failure __msg("cannot update sockmap in this context")
+int test_raw_tp_delete(const void *ctx __always_unused)
+{
+ test_sockmap_delete();
+ return 0;
+}
+
+SEC("raw_tp/kfree")
+__failure __msg("cannot update sockmap in this context")
+int test_raw_tp_update(const void *ctx __always_unused)
+{
+ test_sockmap_lookup_and_update();
+ return 0;
+}
+
+SEC("sk_lookup")
+__success
+int test_sk_lookup(struct bpf_sk_lookup *ctx)
+{
+ test_sockmap_mutate(ctx->sk);
+ return 0;
+}
+
+SEC("sk_reuseport")
+__success
+int test_sk_reuseport(struct sk_reuseport_md *ctx)
+{
+ test_sockmap_mutate(ctx->sk);
+ return 0;
+}
+
+SEC("socket")
+__success
+int test_socket_filter(struct __sk_buff *skb)
+{
+ test_sockmap_mutate(skb->sk);
+ return 0;
+}
+
+SEC("sockops")
+__success
+int test_sockops_delete(struct bpf_sock_ops *ctx __always_unused)
+{
+ test_sockmap_delete();
+ return CG_OK;
+}
+
+SEC("sockops")
+__failure __msg("cannot update sockmap in this context")
+int test_sockops_update(struct bpf_sock_ops *ctx)
+{
+ test_sockmap_update(ctx->sk);
+ return CG_OK;
+}
+
+SEC("sockops")
+__success
+int test_sockops_update_dedicated(struct bpf_sock_ops *ctx)
+{
+ bpf_sock_map_update(ctx, &sockmap, &zero, BPF_ANY);
+ bpf_sock_hash_update(ctx, &sockhash, &zero, BPF_ANY);
+ return CG_OK;
+}
+
+SEC("xdp")
+__success
+int test_xdp(struct xdp_md *ctx __always_unused)
+{
+ test_sockmap_lookup_and_mutate();
+ return XDP_PASS;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
new file mode 100644
index 000000000000..1e5a511e8494
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
@@ -0,0 +1,1282 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/spill_fill.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include <../../../tools/include/linux/filter.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
+} map_ringbuf SEC(".maps");
+
+SEC("socket")
+__description("check valid spill/fill")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(POINTER_VALUE)
+__naked void check_valid_spill_fill(void)
+{
+ asm volatile (" \
+ /* spill R1(ctx) into stack */ \
+ *(u64*)(r10 - 8) = r1; \
+ /* fill it back into R2 */ \
+ r2 = *(u64*)(r10 - 8); \
+ /* should be able to access R0 = *(R2 + 8) */ \
+ /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */\
+ r0 = r2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("check valid spill/fill, skb mark")
+__success __success_unpriv __retval(0)
+__naked void valid_spill_fill_skb_mark(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ *(u64*)(r10 - 8) = r6; \
+ r0 = *(u64*)(r10 - 8); \
+ r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
+ exit; \
+" :
+ : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check valid spill/fill, ptr to mem")
+__success __success_unpriv __retval(0)
+__naked void spill_fill_ptr_to_mem(void)
+{
+ asm volatile (" \
+ /* reserve 8 byte ringbuf memory */ \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r1 = %[map_ringbuf] ll; \
+ r2 = 8; \
+ r3 = 0; \
+ call %[bpf_ringbuf_reserve]; \
+ /* store a pointer to the reserved memory in R6 */\
+ r6 = r0; \
+ /* check whether the reservation was successful */\
+ if r0 == 0 goto l0_%=; \
+ /* spill R6(mem) into the stack */ \
+ *(u64*)(r10 - 8) = r6; \
+ /* fill it back in R7 */ \
+ r7 = *(u64*)(r10 - 8); \
+ /* should be able to access *(R7) = 0 */ \
+ r1 = 0; \
+ *(u64*)(r7 + 0) = r1; \
+ /* submit the reserved ringbuf memory */ \
+ r1 = r7; \
+ r2 = 0; \
+ call %[bpf_ringbuf_submit]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ringbuf_reserve),
+ __imm(bpf_ringbuf_submit),
+ __imm_addr(map_ringbuf)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check with invalid reg offset 0")
+__failure __msg("R0 pointer arithmetic on ringbuf_mem_or_null prohibited")
+__failure_unpriv
+__naked void with_invalid_reg_offset_0(void)
+{
+ asm volatile (" \
+ /* reserve 8 byte ringbuf memory */ \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r1 = %[map_ringbuf] ll; \
+ r2 = 8; \
+ r3 = 0; \
+ call %[bpf_ringbuf_reserve]; \
+ /* store a pointer to the reserved memory in R6 */\
+ r6 = r0; \
+ /* add invalid offset to memory or NULL */ \
+ r0 += 1; \
+ /* check whether the reservation was successful */\
+ if r0 == 0 goto l0_%=; \
+ /* should not be able to access *(R7) = 0 */ \
+ r1 = 0; \
+ *(u32*)(r6 + 0) = r1; \
+ /* submit the reserved ringbuf memory */ \
+ r1 = r6; \
+ r2 = 0; \
+ call %[bpf_ringbuf_submit]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ringbuf_reserve),
+ __imm(bpf_ringbuf_submit),
+ __imm_addr(map_ringbuf)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("check corrupted spill/fill")
+__failure __msg("R0 invalid mem access 'scalar'")
+__msg_unpriv("attempt to corrupt spilled")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void check_corrupted_spill_fill(void)
+{
+ asm volatile (" \
+ /* spill R1(ctx) into stack */ \
+ *(u64*)(r10 - 8) = r1; \
+ /* mess up with R1 pointer on stack */ \
+ r0 = 0x23; \
+ *(u8*)(r10 - 7) = r0; \
+ /* fill back into R0 is fine for priv. \
+ * R0 now becomes SCALAR_VALUE. \
+ */ \
+ r0 = *(u64*)(r10 - 8); \
+ /* Load from R0 should fail. */ \
+ r0 = *(u64*)(r0 + 8); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("check corrupted spill/fill, LSB")
+__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
+__retval(POINTER_VALUE)
+__naked void check_corrupted_spill_fill_lsb(void)
+{
+ asm volatile (" \
+ *(u64*)(r10 - 8) = r1; \
+ r0 = 0xcafe; \
+ *(u16*)(r10 - 8) = r0; \
+ r0 = *(u64*)(r10 - 8); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("check corrupted spill/fill, MSB")
+__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
+__retval(POINTER_VALUE)
+__naked void check_corrupted_spill_fill_msb(void)
+{
+ asm volatile (" \
+ *(u64*)(r10 - 8) = r1; \
+ r0 = 0x12345678; \
+ *(u32*)(r10 - 4) = r0; \
+ r0 = *(u64*)(r10 - 8); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("Spill and refill a u32 const scalar. Offset to skb->data")
+__success __retval(0)
+__naked void scalar_offset_to_skb_data_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ w4 = 20; \
+ *(u32*)(r10 - 8) = r4; \
+ r4 = *(u32*)(r10 - 8); \
+ r0 = r2; \
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ \
+ r0 += r4; \
+ /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
+ if r0 > r3 goto l0_%=; \
+ /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */\
+ r0 = *(u32*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("Spill a u32 const, refill from another half of the uninit u32 from the stack")
+/* in privileged mode reads from uninitialized stack locations are permitted */
+__success __failure_unpriv
+__msg_unpriv("invalid read from stack off -4+0 size 4")
+__retval(0)
+__naked void uninit_u32_from_the_stack(void)
+{
+ asm volatile (" \
+ w4 = 20; \
+ *(u32*)(r10 - 8) = r4; \
+ /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ \
+ r4 = *(u32*)(r10 - 4); \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("Spill a u32 const scalar. Refill as u16. Offset to skb->data")
+__success __retval(0)
+__naked void u16_offset_to_skb_data(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ w4 = 20; \
+ *(u32*)(r10 - 8) = r4; \
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r4 = *(u16*)(r10 - 8);"
+#else
+ "r4 = *(u16*)(r10 - 6);"
+#endif
+ " \
+ r0 = r2; \
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */\
+ r0 += r4; \
+ /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
+ if r0 > r3 goto l0_%=; \
+ /* r0 = *(u32 *)r2 R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
+ r0 = *(u32*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("Spill u32 const scalars. Refill as u64. Offset to skb->data")
+__failure __msg("math between pkt pointer and register with unbounded min value is not allowed")
+__naked void u64_offset_to_skb_data(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ w6 = 0; \
+ w7 = 20; \
+ *(u32*)(r10 - 4) = r6; \
+ *(u32*)(r10 - 8) = r7; \
+ r4 = *(u64*)(r10 - 8); \
+ r0 = r2; \
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4= */ \
+ r0 += r4; \
+ if r0 > r3 goto l0_%=; \
+ r0 = *(u32*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("Spill a u32 const scalar. Refill as u16 from MSB. Offset to skb->data")
+__failure __msg("invalid access to packet")
+__naked void _6_offset_to_skb_data(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ w4 = 20; \
+ *(u32*)(r10 - 8) = r4; \
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r4 = *(u16*)(r10 - 6);"
+#else
+ "r4 = *(u16*)(r10 - 8);"
+#endif
+ " \
+ r0 = r2; \
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
+ r0 += r4; \
+ /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
+ if r0 > r3 goto l0_%=; \
+ /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
+ r0 = *(u32*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data")
+__failure __msg("invalid access to packet")
+__naked void addr_offset_to_skb_data(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ w4 = 20; \
+ *(u32*)(r10 - 8) = r4; \
+ *(u32*)(r10 - 4) = r4; \
+ r4 = *(u32*)(r10 - 4); \
+ r0 = r2; \
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */\
+ r0 += r4; \
+ /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
+ if r0 > r3 goto l0_%=; \
+ /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
+ r0 = *(u32*)(r2 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("Spill and refill a umax=40 bounded scalar. Offset to skb->data")
+__success __retval(0)
+__naked void scalar_offset_to_skb_data_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r4 = *(u64*)(r1 + %[__sk_buff_tstamp]); \
+ if r4 <= 40 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ \
+ *(u32*)(r10 - 8) = r4; \
+ /* r4 = (*u32 *)(r10 - 8) */ \
+ r4 = *(u32*)(r10 - 8); \
+ /* r2 += r4 R2=pkt R4=umax=40 */ \
+ r2 += r4; \
+ /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ \
+ r0 = r2; \
+ /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ \
+ r2 += 20; \
+ /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */\
+ if r2 > r3 goto l1_%=; \
+ /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */\
+ r0 = *(u32*)(r0 + 0); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+ __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("Spill a u32 scalar at fp-4 and then at fp-8")
+__success __retval(0)
+__naked void and_then_at_fp_8(void)
+{
+ asm volatile (" \
+ w4 = 4321; \
+ *(u32*)(r10 - 4) = r4; \
+ *(u32*)(r10 - 8) = r4; \
+ r4 = *(u64*)(r10 - 8); \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit spill of 64-bit reg should clear ID")
+__failure __msg("math between ctx pointer and 4294967295 is not allowed")
+__naked void spill_32bit_of_64bit_fail(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ /* Roll one bit to force the verifier to track both branches. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x8; \
+ /* Put a large number into r1. */ \
+ r1 = 0xffffffff; \
+ r1 <<= 32; \
+ r1 += r0; \
+ /* Assign an ID to r1. */ \
+ r2 = r1; \
+ /* 32-bit spill r1 to stack - should clear the ID! */\
+ *(u32*)(r10 - 8) = r1; \
+ /* 32-bit fill r2 from stack. */ \
+ r2 = *(u32*)(r10 - 8); \
+ /* Compare r2 with another register to trigger sync_linked_regs.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. If the ID was mistakenly preserved on spill, this would\
+ * cause the verifier to think that r1 is also equal to zero in one of\
+ * the branches, and equal to eight on the other branch.\
+ */ \
+ r3 = 0; \
+ if r2 != r3 goto l0_%=; \
+l0_%=: r1 >>= 32; \
+ /* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\
+ * read will happen, because it actually contains 0xffffffff.\
+ */ \
+ r6 += r1; \
+ r0 = *(u32*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("16-bit spill of 32-bit reg should clear ID")
+__failure __msg("dereference of modified ctx ptr R6 off=65535 disallowed")
+__naked void spill_16bit_of_32bit_fail(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ /* Roll one bit to force the verifier to track both branches. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x8; \
+ /* Put a large number into r1. */ \
+ w1 = 0xffff0000; \
+ r1 += r0; \
+ /* Assign an ID to r1. */ \
+ r2 = r1; \
+ /* 16-bit spill r1 to stack - should clear the ID! */\
+ *(u16*)(r10 - 8) = r1; \
+ /* 16-bit fill r2 from stack. */ \
+ r2 = *(u16*)(r10 - 8); \
+ /* Compare r2 with another register to trigger sync_linked_regs.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. If the ID was mistakenly preserved on spill, this would\
+ * cause the verifier to think that r1 is also equal to zero in one of\
+ * the branches, and equal to eight on the other branch.\
+ */ \
+ r3 = 0; \
+ if r2 != r3 goto l0_%=; \
+l0_%=: r1 >>= 16; \
+ /* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\
+ * read will happen, because it actually contains 0xffff.\
+ */ \
+ r6 += r1; \
+ r0 = *(u32*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__log_level(2)
+__success
+__msg("fp-8=0m??scalar()")
+__msg("fp-16=00mm??scalar()")
+__msg("fp-24=00mm???scalar()")
+__naked void spill_subregs_preserve_stack_zero(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+
+ /* 32-bit subreg spill with ZERO, MISC, and INVALID */
+ ".8byte %[fp1_u8_st_zero];" /* ZERO, LLVM-18+: *(u8 *)(r10 -1) = 0; */
+ "*(u8 *)(r10 -2) = r0;" /* MISC */
+ /* fp-3 and fp-4 stay INVALID */
+ "*(u32 *)(r10 -8) = r0;"
+
+ /* 16-bit subreg spill with ZERO, MISC, and INVALID */
+ ".8byte %[fp10_u16_st_zero];" /* ZERO, LLVM-18+: *(u16 *)(r10 -10) = 0; */
+ "*(u16 *)(r10 -12) = r0;" /* MISC */
+ /* fp-13 and fp-14 stay INVALID */
+ "*(u16 *)(r10 -16) = r0;"
+
+ /* 8-bit subreg spill with ZERO, MISC, and INVALID */
+ ".8byte %[fp18_u16_st_zero];" /* ZERO, LLVM-18+: *(u16 *)(r18 -10) = 0; */
+ "*(u16 *)(r10 -20) = r0;" /* MISC */
+ /* fp-21, fp-22, and fp-23 stay INVALID */
+ "*(u8 *)(r10 -24) = r0;"
+
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm_insn(fp1_u8_st_zero, BPF_ST_MEM(BPF_B, BPF_REG_FP, -1, 0)),
+ __imm_insn(fp10_u16_st_zero, BPF_ST_MEM(BPF_H, BPF_REG_FP, -10, 0)),
+ __imm_insn(fp18_u16_st_zero, BPF_ST_MEM(BPF_H, BPF_REG_FP, -18, 0))
+ : __clobber_all);
+}
+
+char single_byte_buf[1] SEC(".data.single_byte_buf");
+
+SEC("raw_tp")
+__log_level(2)
+__success
+/* fp-8 is spilled IMPRECISE value zero (represented by a zero value fake reg) */
+__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8_w=0")
+/* but fp-16 is spilled IMPRECISE zero const reg */
+__msg("4: (7b) *(u64 *)(r10 -16) = r0 ; R0_w=0 R10=fp0 fp-16_w=0")
+/* validate that assigning R2 from STACK_SPILL with zero value doesn't mark register
+ * precise immediately; if necessary, it will be marked precise later
+ */
+__msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8_w=0")
+/* similarly, when R2 is assigned from spilled register, it is initially
+ * imprecise, but will be marked precise later once it is used in precise context
+ */
+__msg("10: (71) r2 = *(u8 *)(r10 -9) ; R2_w=0 R10=fp0 fp-16_w=0")
+__msg("11: (0f) r1 += r2")
+__msg("mark_precise: frame0: last_idx 11 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r2 stack= before 10: (71) r2 = *(u8 *)(r10 -9)")
+__msg("mark_precise: frame0: regs= stack=-16 before 9: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs= stack=-16 before 8: (73) *(u8 *)(r1 +0) = r2")
+__msg("mark_precise: frame0: regs= stack=-16 before 7: (0f) r1 += r2")
+__msg("mark_precise: frame0: regs= stack=-16 before 6: (71) r2 = *(u8 *)(r10 -1)")
+__msg("mark_precise: frame0: regs= stack=-16 before 5: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs= stack=-16 before 4: (7b) *(u64 *)(r10 -16) = r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 3: (b7) r0 = 0")
+__naked void partial_stack_load_preserves_zeros(void)
+{
+ asm volatile (
+ /* fp-8 is value zero (represented by a zero value fake reg) */
+ ".8byte %[fp8_st_zero];" /* LLVM-18+: *(u64 *)(r10 -8) = 0; */
+
+ /* fp-16 is const zero register */
+ "r0 = 0;"
+ "*(u64 *)(r10 -16) = r0;"
+
+ /* load single U8 from non-aligned spilled value zero slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u8 *)(r10 -1);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U8 from non-aligned ZERO REG slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u8 *)(r10 -9);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U16 from non-aligned spilled value zero slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u16 *)(r10 -2);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U16 from non-aligned ZERO REG slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u16 *)(r10 -10);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U32 from non-aligned spilled value zero slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u32 *)(r10 -4);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U32 from non-aligned ZERO REG slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u32 *)(r10 -12);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* for completeness, load U64 from STACK_ZERO slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u64 *)(r10 -8);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* for completeness, load U64 from ZERO REG slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u64 *)(r10 -16);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_ptr(single_byte_buf),
+ __imm_insn(fp8_st_zero, BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0))
+ : __clobber_common);
+}
+
+SEC("raw_tp")
+__log_level(2)
+__success
+/* fp-4 is STACK_ZERO */
+__msg("2: (62) *(u32 *)(r10 -4) = 0 ; R10=fp0 fp-8=0000????")
+__msg("4: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8=0000????")
+__msg("5: (0f) r1 += r2")
+__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r2 stack= before 4: (71) r2 = *(u8 *)(r10 -1)")
+__naked void partial_stack_load_preserves_partial_zeros(void)
+{
+ asm volatile (
+ /* fp-4 is value zero */
+ ".8byte %[fp4_st_zero];" /* LLVM-18+: *(u32 *)(r10 -4) = 0; */
+
+ /* load single U8 from non-aligned stack zero slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u8 *)(r10 -1);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U16 from non-aligned stack zero slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u16 *)(r10 -2);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U32 from non-aligned stack zero slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u32 *)(r10 -4);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_ptr(single_byte_buf),
+ __imm_insn(fp4_st_zero, BPF_ST_MEM(BPF_W, BPF_REG_FP, -4, 0))
+ : __clobber_common);
+}
+
+char two_byte_buf[2] SEC(".data.two_byte_buf");
+
+SEC("raw_tp")
+__log_level(2) __flag(BPF_F_TEST_STATE_FREQ)
+__success
+/* make sure fp-8 is IMPRECISE fake register spill */
+__msg("3: (7a) *(u64 *)(r10 -8) = 1 ; R10=fp0 fp-8_w=1")
+/* and fp-16 is spilled IMPRECISE const reg */
+__msg("5: (7b) *(u64 *)(r10 -16) = r0 ; R0_w=1 R10=fp0 fp-16_w=1")
+/* validate load from fp-8, which was initialized using BPF_ST_MEM */
+__msg("8: (79) r2 = *(u64 *)(r10 -8) ; R2_w=1 R10=fp0 fp-8=1")
+__msg("9: (0f) r1 += r2")
+__msg("mark_precise: frame0: last_idx 9 first_idx 7 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r2 stack= before 8: (79) r2 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r1 = r6")
+/* note, fp-8 is precise, fp-16 is not yet precise, we'll get there */
+__msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_rw=P1 fp-16_w=1")
+__msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
+__msg("mark_precise: frame0: regs= stack=-8 before 6: (05) goto pc+0")
+__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -16) = r0")
+__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r0 = 1")
+__msg("mark_precise: frame0: regs= stack=-8 before 3: (7a) *(u64 *)(r10 -8) = 1")
+__msg("10: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1")
+/* validate load from fp-16, which was initialized using BPF_STX_MEM */
+__msg("12: (79) r2 = *(u64 *)(r10 -16) ; R2_w=1 R10=fp0 fp-16=1")
+__msg("13: (0f) r1 += r2")
+__msg("mark_precise: frame0: last_idx 13 first_idx 7 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r2 stack= before 12: (79) r2 = *(u64 *)(r10 -16)")
+__msg("mark_precise: frame0: regs= stack=-16 before 11: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs= stack=-16 before 10: (73) *(u8 *)(r1 +0) = r2")
+__msg("mark_precise: frame0: regs= stack=-16 before 9: (0f) r1 += r2")
+__msg("mark_precise: frame0: regs= stack=-16 before 8: (79) r2 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: regs= stack=-16 before 7: (bf) r1 = r6")
+/* now both fp-8 and fp-16 are precise, very good */
+__msg("mark_precise: frame0: parent state regs= stack=-16: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_rw=P1 fp-16_rw=P1")
+__msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
+__msg("mark_precise: frame0: regs= stack=-16 before 6: (05) goto pc+0")
+__msg("mark_precise: frame0: regs= stack=-16 before 5: (7b) *(u64 *)(r10 -16) = r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (b7) r0 = 1")
+__msg("14: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1")
+__naked void stack_load_preserves_const_precision(void)
+{
+ asm volatile (
+ /* establish checkpoint with state that has no stack slots;
+ * if we bubble up to this state without finding desired stack
+ * slot, then it's a bug and should be caught
+ */
+ "goto +0;"
+
+ /* fp-8 is const 1 *fake* register */
+ ".8byte %[fp8_st_one];" /* LLVM-18+: *(u64 *)(r10 -8) = 1; */
+
+ /* fp-16 is const 1 register */
+ "r0 = 1;"
+ "*(u64 *)(r10 -16) = r0;"
+
+ /* force checkpoint to check precision marks preserved in parent states */
+ "goto +0;"
+
+ /* load single U64 from aligned FAKE_REG=1 slot */
+ "r1 = %[two_byte_buf];"
+ "r2 = *(u64 *)(r10 -8);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U64 from aligned REG=1 slot */
+ "r1 = %[two_byte_buf];"
+ "r2 = *(u64 *)(r10 -16);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_ptr(two_byte_buf),
+ __imm_insn(fp8_st_one, BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 1))
+ : __clobber_common);
+}
+
+SEC("raw_tp")
+__log_level(2) __flag(BPF_F_TEST_STATE_FREQ)
+__success
+/* make sure fp-8 is 32-bit FAKE subregister spill */
+__msg("3: (62) *(u32 *)(r10 -8) = 1 ; R10=fp0 fp-8=????1")
+/* but fp-16 is spilled IMPRECISE zero const reg */
+__msg("5: (63) *(u32 *)(r10 -16) = r0 ; R0_w=1 R10=fp0 fp-16=????1")
+/* validate load from fp-8, which was initialized using BPF_ST_MEM */
+__msg("8: (61) r2 = *(u32 *)(r10 -8) ; R2_w=1 R10=fp0 fp-8=????1")
+__msg("9: (0f) r1 += r2")
+__msg("mark_precise: frame0: last_idx 9 first_idx 7 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r2 stack= before 8: (61) r2 = *(u32 *)(r10 -8)")
+__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r1 = r6")
+__msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_r=????P1 fp-16=????1")
+__msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
+__msg("mark_precise: frame0: regs= stack=-8 before 6: (05) goto pc+0")
+__msg("mark_precise: frame0: regs= stack=-8 before 5: (63) *(u32 *)(r10 -16) = r0")
+__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r0 = 1")
+__msg("mark_precise: frame0: regs= stack=-8 before 3: (62) *(u32 *)(r10 -8) = 1")
+__msg("10: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1")
+/* validate load from fp-16, which was initialized using BPF_STX_MEM */
+__msg("12: (61) r2 = *(u32 *)(r10 -16) ; R2_w=1 R10=fp0 fp-16=????1")
+__msg("13: (0f) r1 += r2")
+__msg("mark_precise: frame0: last_idx 13 first_idx 7 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r2 stack= before 12: (61) r2 = *(u32 *)(r10 -16)")
+__msg("mark_precise: frame0: regs= stack=-16 before 11: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs= stack=-16 before 10: (73) *(u8 *)(r1 +0) = r2")
+__msg("mark_precise: frame0: regs= stack=-16 before 9: (0f) r1 += r2")
+__msg("mark_precise: frame0: regs= stack=-16 before 8: (61) r2 = *(u32 *)(r10 -8)")
+__msg("mark_precise: frame0: regs= stack=-16 before 7: (bf) r1 = r6")
+__msg("mark_precise: frame0: parent state regs= stack=-16: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_r=????P1 fp-16_r=????P1")
+__msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
+__msg("mark_precise: frame0: regs= stack=-16 before 6: (05) goto pc+0")
+__msg("mark_precise: frame0: regs= stack=-16 before 5: (63) *(u32 *)(r10 -16) = r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (b7) r0 = 1")
+__msg("14: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1")
+__naked void stack_load_preserves_const_precision_subreg(void)
+{
+ asm volatile (
+ /* establish checkpoint with state that has no stack slots;
+ * if we bubble up to this state without finding desired stack
+ * slot, then it's a bug and should be caught
+ */
+ "goto +0;"
+
+ /* fp-8 is const 1 *fake* SUB-register */
+ ".8byte %[fp8_st_one];" /* LLVM-18+: *(u32 *)(r10 -8) = 1; */
+
+ /* fp-16 is const 1 SUB-register */
+ "r0 = 1;"
+ "*(u32 *)(r10 -16) = r0;"
+
+ /* force checkpoint to check precision marks preserved in parent states */
+ "goto +0;"
+
+ /* load single U32 from aligned FAKE_REG=1 slot */
+ "r1 = %[two_byte_buf];"
+ "r2 = *(u32 *)(r10 -8);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U32 from aligned REG=1 slot */
+ "r1 = %[two_byte_buf];"
+ "r2 = *(u32 *)(r10 -16);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_ptr(two_byte_buf),
+ __imm_insn(fp8_st_one, BPF_ST_MEM(BPF_W, BPF_REG_FP, -8, 1)) /* 32-bit spill */
+ : __clobber_common);
+}
+
+SEC("xdp")
+__description("32-bit spilled reg range should be tracked")
+__success __retval(0)
+__naked void spill_32bit_range_track(void)
+{
+ asm volatile(" \
+ call %[bpf_ktime_get_ns]; \
+ /* Make r0 bounded. */ \
+ r0 &= 65535; \
+ /* Assign an ID to r0. */ \
+ r1 = r0; \
+ /* 32-bit spill r0 to stack. */ \
+ *(u32*)(r10 - 8) = r0; \
+ /* Boundary check on r0. */ \
+ if r0 < 1 goto l0_%=; \
+ /* 32-bit fill r1 from stack. */ \
+ r1 = *(u32*)(r10 - 8); \
+ /* r1 == r0 => r1 >= 1 always. */ \
+ if r1 >= 1 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. \
+ * Do an invalid memory access if the verifier \
+ * follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("64-bit spill of 64-bit reg should assign ID")
+__success __retval(0)
+__naked void spill_64bit_of_64bit_ok(void)
+{
+ asm volatile (" \
+ /* Roll one bit to make the register inexact. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x80000000; \
+ r0 <<= 32; \
+ /* 64-bit spill r0 to stack - should assign an ID. */\
+ *(u64*)(r10 - 8) = r0; \
+ /* 64-bit fill r1 from stack - should preserve the ID. */\
+ r1 = *(u64*)(r10 - 8); \
+ /* Compare r1 with another register to trigger sync_linked_regs.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. \
+ */ \
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit spill of 32-bit reg should assign ID")
+__success __retval(0)
+__naked void spill_32bit_of_32bit_ok(void)
+{
+ asm volatile (" \
+ /* Roll one bit to make the register inexact. */\
+ call %[bpf_get_prandom_u32]; \
+ w0 &= 0x80000000; \
+ /* 32-bit spill r0 to stack - should assign an ID. */\
+ *(u32*)(r10 - 8) = r0; \
+ /* 32-bit fill r1 from stack - should preserve the ID. */\
+ r1 = *(u32*)(r10 - 8); \
+ /* Compare r1 with another register to trigger sync_linked_regs.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. \
+ */ \
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("16-bit spill of 16-bit reg should assign ID")
+__success __retval(0)
+__naked void spill_16bit_of_16bit_ok(void)
+{
+ asm volatile (" \
+ /* Roll one bit to make the register inexact. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x8000; \
+ /* 16-bit spill r0 to stack - should assign an ID. */\
+ *(u16*)(r10 - 8) = r0; \
+ /* 16-bit fill r1 from stack - should preserve the ID. */\
+ r1 = *(u16*)(r10 - 8); \
+ /* Compare r1 with another register to trigger sync_linked_regs.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. \
+ */ \
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("8-bit spill of 8-bit reg should assign ID")
+__success __retval(0)
+__naked void spill_8bit_of_8bit_ok(void)
+{
+ asm volatile (" \
+ /* Roll one bit to make the register inexact. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x80; \
+ /* 8-bit spill r0 to stack - should assign an ID. */\
+ *(u8*)(r10 - 8) = r0; \
+ /* 8-bit fill r1 from stack - should preserve the ID. */\
+ r1 = *(u8*)(r10 - 8); \
+ /* Compare r1 with another register to trigger sync_linked_regs.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. \
+ */ \
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("spill unbounded reg, then range check src")
+__success __retval(0)
+__naked void spill_unbounded(void)
+{
+ asm volatile (" \
+ /* Produce an unbounded scalar. */ \
+ call %[bpf_get_prandom_u32]; \
+ /* Spill r0 to stack. */ \
+ *(u64*)(r10 - 8) = r0; \
+ /* Boundary check on r0. */ \
+ if r0 > 16 goto l0_%=; \
+ /* Fill r0 from stack. */ \
+ r0 = *(u64*)(r10 - 8); \
+ /* Boundary check on r0 with predetermined result. */\
+ if r0 <= 16 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit fill after 64-bit spill")
+__success __retval(0)
+__naked void fill_32bit_after_spill_64bit(void)
+{
+ asm volatile(" \
+ /* Randomize the upper 32 bits. */ \
+ call %[bpf_get_prandom_u32]; \
+ r0 <<= 32; \
+ /* 64-bit spill r0 to stack. */ \
+ *(u64*)(r10 - 8) = r0; \
+ /* 32-bit fill r0 from stack. */ \
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(u32*)(r10 - 8);"
+#else
+ "r0 = *(u32*)(r10 - 4);"
+#endif
+ " \
+ /* Boundary check on r0 with predetermined result. */\
+ if r0 == 0 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit fill after 64-bit spill of 32-bit value should preserve ID")
+__success __retval(0)
+__naked void fill_32bit_after_spill_64bit_preserve_id(void)
+{
+ asm volatile (" \
+ /* Randomize the lower 32 bits. */ \
+ call %[bpf_get_prandom_u32]; \
+ w0 &= 0xffffffff; \
+ /* 64-bit spill r0 to stack - should assign an ID. */\
+ *(u64*)(r10 - 8) = r0; \
+ /* 32-bit fill r1 from stack - should preserve the ID. */\
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r1 = *(u32*)(r10 - 8);"
+#else
+ "r1 = *(u32*)(r10 - 4);"
+#endif
+ " \
+ /* Compare r1 with another register to trigger sync_linked_regs. */\
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit fill after 64-bit spill should clear ID")
+__failure __msg("math between ctx pointer and 4294967295 is not allowed")
+__naked void fill_32bit_after_spill_64bit_clear_id(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ /* Roll one bit to force the verifier to track both branches. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x8; \
+ /* Put a large number into r1. */ \
+ r1 = 0xffffffff; \
+ r1 <<= 32; \
+ r1 += r0; \
+ /* 64-bit spill r1 to stack - should assign an ID. */\
+ *(u64*)(r10 - 8) = r1; \
+ /* 32-bit fill r2 from stack - should clear the ID. */\
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r2 = *(u32*)(r10 - 8);"
+#else
+ "r2 = *(u32*)(r10 - 4);"
+#endif
+ " \
+ /* Compare r2 with another register to trigger sync_linked_regs.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. If the ID was mistakenly preserved on fill, this would\
+ * cause the verifier to think that r1 is also equal to zero in one of\
+ * the branches, and equal to eight on the other branch.\
+ */ \
+ r3 = 0; \
+ if r2 != r3 goto l0_%=; \
+l0_%=: r1 >>= 32; \
+ /* The verifier shouldn't propagate r2's range to r1, so it should\
+ * still remember r1 = 0xffffffff and reject the below.\
+ */ \
+ r6 += r1; \
+ r0 = *(u32*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* stacksafe(): check if stack spill of an imprecise scalar in old state
+ * is considered equivalent to STACK_{MISC,INVALID} in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("8: (79) r1 = *(u64 *)(r10 -8)")
+__msg("8: safe")
+__msg("processed 11 insns")
+/* STACK_INVALID should prevent verifier in unpriv mode from
+ * considering states equivalent and force an error on second
+ * verification path (entry - label 1 - label 2).
+ */
+__failure_unpriv
+__msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)")
+__msg_unpriv("9: (95) exit")
+__msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)")
+__msg_unpriv("invalid read from stack off -8+2 size 8")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_imprecise_scalar_vs_cur_stack_misc(void)
+{
+ asm volatile(
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure scalar at fp-8 */
+ "r0 = 42;"
+ "*(u64*)(r10 - 8) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure STACK_{MISC,INVALID} at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u16*)(r10 - 8) = r0;"
+ "*(u16*)(r10 - 4) = r0;"
+"2:"
+ /* read fp-8, should be considered safe on second visit */
+ "r1 = *(u64*)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* stacksafe(): check that stack spill of a precise scalar in old state
+ * is not considered equivalent to STACK_MISC in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+/* verifier should visit 'if r1 == 0x2a ...' two times:
+ * - once for path entry - label 2;
+ * - once for path entry - label 1 - label 2.
+ */
+__msg("if r1 == 0x2a goto pc+0")
+__msg("if r1 == 0x2a goto pc+0")
+__msg("processed 15 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_precise_scalar_vs_cur_stack_misc(void)
+{
+ asm volatile(
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure scalar at fp-8 */
+ "r0 = 42;"
+ "*(u64*)(r10 - 8) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure STACK_MISC at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u64*)(r10 - 8) = r0;"
+ "*(u32*)(r10 - 4) = r0;"
+"2:"
+ /* read fp-8, should not be considered safe on second visit */
+ "r1 = *(u64*)(r10 - 8);"
+ /* use r1 in precise context */
+ "if r1 == 42 goto +0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* stacksafe(): check if STACK_MISC in old state is considered
+ * equivalent to stack spill of a scalar in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("8: (79) r0 = *(u64 *)(r10 -8)")
+__msg("8: safe")
+__msg("processed 11 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_stack_misc_vs_cur_scalar(void)
+{
+ asm volatile(
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure STACK_{MISC,INVALID} at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u16*)(r10 - 8) = r0;"
+ "*(u16*)(r10 - 4) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure scalar at fp-8 */
+ "r0 = 42;"
+ "*(u64*)(r10 - 8) = r0;"
+"2:"
+ /* read fp-8, should be considered safe on second visit */
+ "r0 = *(u64*)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* stacksafe(): check that STACK_MISC in old state is not considered
+ * equivalent to stack spill of a non-scalar in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+/* verifier should process exit instructions twice:
+ * - once for path entry - label 2;
+ * - once for path entry - label 1 - label 2.
+ */
+__msg("8: (79) r1 = *(u64 *)(r10 -8)")
+__msg("9: (95) exit")
+__msg("from 2 to 7")
+__msg("8: safe")
+__msg("processed 11 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_stack_misc_vs_cur_ctx_ptr(void)
+{
+ asm volatile(
+ /* remember context pointer in r9 */
+ "r9 = r1;"
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure STACK_MISC at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u64*)(r10 - 8) = r0;"
+ "*(u32*)(r10 - 4) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure context pointer in fp-8 */
+ "*(u64*)(r10 - 8) = r9;"
+"2:"
+ /* read fp-8, should not be considered safe on second visit */
+ "r1 = *(u64*)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("stack_noperfmon: reject read of invalid slots")
+__success
+__caps_unpriv(CAP_BPF)
+__failure_unpriv __msg_unpriv("invalid read from stack off -8+1 size 8")
+__naked void stack_noperfmon_reject_invalid_read(void)
+{
+ asm volatile (" \
+ r2 = 1; \
+ r6 = r10; \
+ r6 += -8; \
+ *(u8 *)(r6 + 0) = r2; \
+ r2 = *(u64 *)(r6 + 0); \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("stack_noperfmon: narrow spill onto 64-bit scalar spilled slots")
+__success
+__caps_unpriv(CAP_BPF)
+__success_unpriv
+__naked void stack_noperfmon_spill_32bit_onto_64bit_slot(void)
+{
+ asm volatile(" \
+ r0 = 0; \
+ *(u64 *)(r10 - 8) = r0; \
+ *(u32 *)(r10 - 8) = r0; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
new file mode 100644
index 000000000000..d9d7b05cf6d2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
@@ -0,0 +1,559 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/spin_lock.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct val {
+ int cnt;
+ struct bpf_spin_lock l;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct val);
+} map_spin_lock SEC(".maps");
+
+SEC("cgroup/skb")
+__description("spin_lock: test1 success")
+__success __failure_unpriv __msg_unpriv("")
+__retval(0)
+__naked void spin_lock_test1_success(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+ r1 = r6; \
+ r1 += 4; \
+ r0 = *(u32*)(r6 + 0); \
+ call %[bpf_spin_unlock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("spin_lock: test2 direct ld/st")
+__failure __msg("cannot be accessed directly")
+__failure_unpriv __msg_unpriv("")
+__naked void lock_test2_direct_ld_st(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+ r1 = r6; \
+ r1 += 4; \
+ r0 = *(u32*)(r1 + 0); \
+ call %[bpf_spin_unlock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("spin_lock: test3 direct ld/st")
+__failure __msg("cannot be accessed directly")
+__failure_unpriv __msg_unpriv("")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void lock_test3_direct_ld_st(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+ r1 = r6; \
+ r1 += 4; \
+ r0 = *(u32*)(r6 + 1); \
+ call %[bpf_spin_unlock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("spin_lock: test4 direct ld/st")
+__failure __msg("cannot be accessed directly")
+__failure_unpriv __msg_unpriv("")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void lock_test4_direct_ld_st(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+ r1 = r6; \
+ r1 += 4; \
+ r0 = *(u16*)(r6 + 3); \
+ call %[bpf_spin_unlock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("spin_lock: test5 call within a locked region")
+__failure __msg("calls are not allowed")
+__failure_unpriv __msg_unpriv("")
+__naked void call_within_a_locked_region(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = r6; \
+ r1 += 4; \
+ call %[bpf_spin_unlock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("spin_lock: test6 missing unlock")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_spin_lock-ed region")
+__failure_unpriv __msg_unpriv("")
+__naked void spin_lock_test6_missing_unlock(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+ r1 = r6; \
+ r1 += 4; \
+ r0 = *(u32*)(r6 + 0); \
+ if r0 != 0 goto l1_%=; \
+ call %[bpf_spin_unlock]; \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("spin_lock: test7 unlock without lock")
+__failure __msg("without taking a lock")
+__failure_unpriv __msg_unpriv("")
+__naked void lock_test7_unlock_without_lock(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ r1 += 4; \
+ if r1 != 0 goto l1_%=; \
+ call %[bpf_spin_lock]; \
+l1_%=: r1 = r6; \
+ r1 += 4; \
+ r0 = *(u32*)(r6 + 0); \
+ call %[bpf_spin_unlock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("spin_lock: test8 double lock")
+__failure __msg("calls are not allowed")
+__failure_unpriv __msg_unpriv("")
+__naked void spin_lock_test8_double_lock(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+ r1 = r6; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+ r1 = r6; \
+ r1 += 4; \
+ r0 = *(u32*)(r6 + 0); \
+ call %[bpf_spin_unlock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("spin_lock: test9 different lock")
+__failure __msg("unlock of different lock")
+__failure_unpriv __msg_unpriv("")
+__naked void spin_lock_test9_different_lock(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l1_%=; \
+ exit; \
+l1_%=: r7 = r0; \
+ r1 = r6; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+ r1 = r7; \
+ r1 += 4; \
+ call %[bpf_spin_unlock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("spin_lock: test10 lock in subprog without unlock")
+__success
+__failure_unpriv __msg_unpriv("")
+__naked void lock_in_subprog_without_unlock(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r6 = r0; \
+ r1 = r0; \
+ r1 += 4; \
+ call lock_in_subprog_without_unlock__1; \
+ r1 = r6; \
+ r1 += 4; \
+ call %[bpf_spin_unlock]; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+static __naked __noinline __attribute__((used))
+void lock_in_subprog_without_unlock__1(void)
+{
+ asm volatile (" \
+ call %[bpf_spin_lock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_spin_lock)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("spin_lock: test11 ld_abs under lock")
+__failure __msg("inside bpf_spin_lock")
+__naked void test11_ld_abs_under_lock(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r7 = r0; \
+ r1 = r0; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+ r0 = *(u8*)skb[0]; \
+ r1 = r7; \
+ r1 += 4; \
+ call %[bpf_spin_unlock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("spin_lock: regsafe compare reg->id for map value")
+__failure __msg("bpf_spin_unlock of different lock")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void reg_id_for_map_value(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r6 = *(u32*)(r6 + %[__sk_buff_mark]); \
+ r1 = %[map_spin_lock] ll; \
+ r9 = r1; \
+ r2 = 0; \
+ *(u32*)(r10 - 4) = r2; \
+ r2 = r10; \
+ r2 += -4; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r7 = r0; \
+ r1 = r9; \
+ r2 = r10; \
+ r2 += -4; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l1_%=; \
+ exit; \
+l1_%=: r8 = r0; \
+ r1 = r7; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+ if r6 == 0 goto l2_%=; \
+ goto l3_%=; \
+l2_%=: r7 = r8; \
+l3_%=: r1 = r7; \
+ r1 += 4; \
+ call %[bpf_spin_unlock]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+/* Make sure that regsafe() compares ids for spin lock records using
+ * check_ids():
+ * 1: r9 = map_lookup_elem(...) ; r9.id == 1
+ * 2: r8 = map_lookup_elem(...) ; r8.id == 2
+ * 3: r7 = ktime_get_ns()
+ * 4: r6 = ktime_get_ns()
+ * 5: if r6 > r7 goto <9>
+ * 6: spin_lock(r8)
+ * 7: r9 = r8
+ * 8: goto <10>
+ * 9: spin_lock(r9)
+ * 10: spin_unlock(r9) ; r9.id == 1 || r9.id == 2 and lock is active,
+ * ; second visit to (10) should be considered safe
+ * ; if check_ids() is used.
+ * 11: exit(0)
+ */
+
+SEC("cgroup/skb")
+__description("spin_lock: regsafe() check_ids() similar id mappings")
+__success __msg("29: safe")
+__failure_unpriv __msg_unpriv("")
+__log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ)
+__naked void check_ids_similar_id_mappings(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ /* r9 = map_lookup_elem(...) */ \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r9 = r0; \
+ /* r8 = map_lookup_elem(...) */ \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_spin_lock] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l1_%=; \
+ r8 = r0; \
+ /* r7 = ktime_get_ns() */ \
+ call %[bpf_ktime_get_ns]; \
+ r7 = r0; \
+ /* r6 = ktime_get_ns() */ \
+ call %[bpf_ktime_get_ns]; \
+ r6 = r0; \
+ /* if r6 > r7 goto +5 ; no new information about the state is derived from\
+ * ; this check, thus produced verifier states differ\
+ * ; only in 'insn_idx' \
+ * spin_lock(r8) \
+ * r9 = r8 \
+ * goto unlock \
+ */ \
+ if r6 > r7 goto l2_%=; \
+ r1 = r8; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+ r9 = r8; \
+ goto l3_%=; \
+l2_%=: /* spin_lock(r9) */ \
+ r1 = r9; \
+ r1 += 4; \
+ call %[bpf_spin_lock]; \
+l3_%=: /* spin_unlock(r9) */ \
+ r1 = r9; \
+ r1 += 4; \
+ call %[bpf_spin_unlock]; \
+l0_%=: /* exit(0) */ \
+ r0 = 0; \
+l1_%=: exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm(bpf_spin_lock),
+ __imm(bpf_spin_unlock),
+ __imm_addr(map_spin_lock)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("spin_lock: loop within a locked region")
+__success __failure_unpriv __msg_unpriv("")
+__retval(0)
+int bpf_loop_inside_locked_region(void)
+{
+ const int zero = 0;
+ struct val *val;
+ int i, j = 0;
+
+ val = bpf_map_lookup_elem(&map_spin_lock, &zero);
+ if (!val)
+ return -1;
+
+ bpf_spin_lock(&val->l);
+ bpf_for(i, 0, 10) {
+ j++;
+ /* Silence "unused variable" warnings. */
+ if (j == 10)
+ break;
+ }
+ bpf_spin_unlock(&val->l);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c b/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c
new file mode 100644
index 000000000000..24aabc6083fd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/stack_ptr.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <limits.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct test_val);
+} map_array_48b SEC(".maps");
+
+SEC("socket")
+__description("PTR_TO_STACK store/load")
+__success __success_unpriv __retval(0xfaceb00c)
+__naked void ptr_to_stack_store_load(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -10; \
+ r0 = 0xfaceb00c; \
+ *(u64*)(r1 + 2) = r0; \
+ r0 = *(u64*)(r1 + 2); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK store/load - bad alignment on off")
+__failure __msg("misaligned stack access off 0+-8+2 size 8")
+__failure_unpriv
+__naked void load_bad_alignment_on_off(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -8; \
+ r0 = 0xfaceb00c; \
+ *(u64*)(r1 + 2) = r0; \
+ r0 = *(u64*)(r1 + 2); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK store/load - bad alignment on reg")
+__failure __msg("misaligned stack access off 0+-10+8 size 8")
+__failure_unpriv
+__naked void load_bad_alignment_on_reg(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -10; \
+ r0 = 0xfaceb00c; \
+ *(u64*)(r1 + 8) = r0; \
+ r0 = *(u64*)(r1 + 8); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK store/load - out of bounds low")
+__failure __msg("invalid write to stack R1 off=-79992 size=8")
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__naked void load_out_of_bounds_low(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -80000; \
+ r0 = 0xfaceb00c; \
+ *(u64*)(r1 + 8) = r0; \
+ r0 = *(u64*)(r1 + 8); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK store/load - out of bounds high")
+__failure __msg("invalid write to stack R1 off=0 size=8")
+__failure_unpriv
+__naked void load_out_of_bounds_high(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -8; \
+ r0 = 0xfaceb00c; \
+ *(u64*)(r1 + 8) = r0; \
+ r0 = *(u64*)(r1 + 8); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check high 1")
+__success __success_unpriv __retval(42)
+__naked void to_stack_check_high_1(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -1; \
+ r0 = 42; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = *(u8*)(r1 + 0); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check high 2")
+__success __success_unpriv __retval(42)
+__naked void to_stack_check_high_2(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r0 = 42; \
+ *(u8*)(r1 - 1) = r0; \
+ r0 = *(u8*)(r1 - 1); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check high 3")
+__success __failure_unpriv
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__retval(42)
+__naked void to_stack_check_high_3(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += 0; \
+ r0 = 42; \
+ *(u8*)(r1 - 1) = r0; \
+ r0 = *(u8*)(r1 - 1); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check high 4")
+__failure __msg("invalid write to stack R1 off=0 size=1")
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__naked void to_stack_check_high_4(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += 0; \
+ r0 = 42; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = *(u8*)(r1 + 0); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check high 5")
+__failure __msg("invalid write to stack R1")
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__naked void to_stack_check_high_5(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += %[__imm_0]; \
+ r0 = 42; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = *(u8*)(r1 + 0); \
+ exit; \
+" :
+ : __imm_const(__imm_0, (1 << 29) - 1)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check high 6")
+__failure __msg("invalid write to stack")
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__naked void to_stack_check_high_6(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += %[__imm_0]; \
+ r0 = 42; \
+ *(u8*)(r1 + %[shrt_max]) = r0; \
+ r0 = *(u8*)(r1 + %[shrt_max]); \
+ exit; \
+" :
+ : __imm_const(__imm_0, (1 << 29) - 1),
+ __imm_const(shrt_max, SHRT_MAX)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check high 7")
+__failure __msg("fp pointer offset")
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__naked void to_stack_check_high_7(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += %[__imm_0]; \
+ r1 += %[__imm_0]; \
+ r0 = 42; \
+ *(u8*)(r1 + %[shrt_max]) = r0; \
+ r0 = *(u8*)(r1 + %[shrt_max]); \
+ exit; \
+" :
+ : __imm_const(__imm_0, (1 << 29) - 1),
+ __imm_const(shrt_max, SHRT_MAX)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check low 1")
+__success __success_unpriv __retval(42)
+__naked void to_stack_check_low_1(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -512; \
+ r0 = 42; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = *(u8*)(r1 + 0); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check low 2")
+__success __failure_unpriv
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__retval(42)
+__naked void to_stack_check_low_2(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -513; \
+ r0 = 42; \
+ *(u8*)(r1 + 1) = r0; \
+ r0 = *(u8*)(r1 + 1); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check low 3")
+__failure __msg("invalid write to stack R1 off=-513 size=1")
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__naked void to_stack_check_low_3(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -513; \
+ r0 = 42; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = *(u8*)(r1 + 0); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check low 4")
+__failure __msg("math between fp pointer")
+__failure_unpriv
+__naked void to_stack_check_low_4(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += %[int_min]; \
+ r0 = 42; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = *(u8*)(r1 + 0); \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check low 5")
+__failure __msg("invalid write to stack")
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__naked void to_stack_check_low_5(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += %[__imm_0]; \
+ r0 = 42; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = *(u8*)(r1 + 0); \
+ exit; \
+" :
+ : __imm_const(__imm_0, -((1 << 29) - 1))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check low 6")
+__failure __msg("invalid write to stack")
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__naked void to_stack_check_low_6(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += %[__imm_0]; \
+ r0 = 42; \
+ *(u8*)(r1 %[shrt_min]) = r0; \
+ r0 = *(u8*)(r1 %[shrt_min]); \
+ exit; \
+" :
+ : __imm_const(__imm_0, -((1 << 29) - 1)),
+ __imm_const(shrt_min, SHRT_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK check low 7")
+__failure __msg("fp pointer offset")
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__naked void to_stack_check_low_7(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += %[__imm_0]; \
+ r1 += %[__imm_0]; \
+ r0 = 42; \
+ *(u8*)(r1 %[shrt_min]) = r0; \
+ r0 = *(u8*)(r1 %[shrt_min]); \
+ exit; \
+" :
+ : __imm_const(__imm_0, -((1 << 29) - 1)),
+ __imm_const(shrt_min, SHRT_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK mixed reg/k, 1")
+__success __success_unpriv __retval(42)
+__naked void stack_mixed_reg_k_1(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -3; \
+ r2 = -3; \
+ r1 += r2; \
+ r0 = 42; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = *(u8*)(r1 + 0); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK mixed reg/k, 2")
+__success __success_unpriv __retval(42)
+__naked void stack_mixed_reg_k_2(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ *(u64*)(r10 - 8) = r0; \
+ r0 = 0; \
+ *(u64*)(r10 - 16) = r0; \
+ r1 = r10; \
+ r1 += -3; \
+ r2 = -3; \
+ r1 += r2; \
+ r0 = 42; \
+ *(u8*)(r1 + 0) = r0; \
+ r5 = r10; \
+ r0 = *(u8*)(r5 - 6); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK mixed reg/k, 3")
+__success __success_unpriv __retval(-3)
+__naked void stack_mixed_reg_k_3(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -3; \
+ r2 = -3; \
+ r1 += r2; \
+ r0 = 42; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = r2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK reg")
+__success __success_unpriv __retval(42)
+__naked void ptr_to_stack_reg(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r2 = -3; \
+ r1 += r2; \
+ r0 = 42; \
+ *(u8*)(r1 + 0) = r0; \
+ r0 = *(u8*)(r1 + 0); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("stack pointer arithmetic")
+__success __success_unpriv __retval(0)
+__naked void stack_pointer_arithmetic(void)
+{
+ asm volatile (" \
+ r1 = 4; \
+ goto l0_%=; \
+l0_%=: r7 = r10; \
+ r7 += -10; \
+ r7 += -10; \
+ r2 = r7; \
+ r2 += r1; \
+ r0 = 0; \
+ *(u32*)(r2 + 4) = r0; \
+ r2 = r7; \
+ r2 += 8; \
+ r0 = 0; \
+ *(u32*)(r2 + 4) = r0; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("store PTR_TO_STACK in R10 to array map using BPF_B")
+__success __retval(42)
+__naked void array_map_using_bpf_b(void)
+{
+ asm volatile (" \
+ /* Load pointer to map. */ \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ r0 = 2; \
+ exit; \
+l0_%=: r1 = r0; \
+ /* Copy R10 to R9. */ \
+ r9 = r10; \
+ /* Pollute other registers with unaligned values. */\
+ r2 = -1; \
+ r3 = -1; \
+ r4 = -1; \
+ r5 = -1; \
+ r6 = -1; \
+ r7 = -1; \
+ r8 = -1; \
+ /* Store both R9 and R10 with BPF_B and read back. */\
+ *(u8*)(r1 + 0) = r10; \
+ r2 = *(u8*)(r1 + 0); \
+ *(u8*)(r1 + 0) = r9; \
+ r3 = *(u8*)(r1 + 0); \
+ /* Should read back as same value. */ \
+ if r2 == r3 goto l1_%=; \
+ r0 = 1; \
+ exit; \
+l1_%=: r0 = 42; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK stack size > 512")
+__failure __msg("invalid write to stack R1 off=-520 size=8")
+__naked void stack_check_size_gt_512(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -520; \
+ r0 = 42; \
+ *(u64*)(r1 + 0) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+#ifdef __BPF_FEATURE_MAY_GOTO
+SEC("socket")
+__description("PTR_TO_STACK stack size 512 with may_goto with jit")
+__load_if_JITed()
+__success __retval(42)
+__naked void stack_check_size_512_with_may_goto_jit(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -512; \
+ r0 = 42; \
+ *(u32*)(r1 + 0) = r0; \
+ may_goto l0_%=; \
+ r2 = 100; \
+ l0_%=: \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("PTR_TO_STACK stack size 512 with may_goto without jit")
+__load_if_no_JITed()
+__failure __msg("stack size 520(extra 8) is too large")
+__naked void stack_check_size_512_with_may_goto(void)
+{
+ asm volatile (" \
+ r1 = r10; \
+ r1 += -512; \
+ r0 = 42; \
+ *(u32*)(r1 + 0) = r0; \
+ may_goto l0_%=; \
+ r2 = 100; \
+ l0_%=: \
+ exit; \
+" ::: __clobber_all);
+}
+#endif
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_store_release.c b/tools/testing/selftests/bpf/progs/verifier_store_release.c
new file mode 100644
index 000000000000..72f1eb006074
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_store_release.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Google LLC. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+#ifdef CAN_USE_LOAD_ACQ_STORE_REL
+
+SEC("socket")
+__description("store-release, 8-bit")
+__success __success_unpriv __retval(0)
+__naked void store_release_8(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "w1 = 0x12;"
+ ".8byte %[store_release_insn];" // store_release((u8 *)(r10 - 1), w1);
+ "w2 = *(u8 *)(r10 - 1);"
+ "if r2 == r1 goto 1f;"
+ "r0 = 1;"
+"1:"
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -1))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("store-release, 16-bit")
+__success __success_unpriv __retval(0)
+__naked void store_release_16(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "w1 = 0x1234;"
+ ".8byte %[store_release_insn];" // store_release((u16 *)(r10 - 2), w1);
+ "w2 = *(u16 *)(r10 - 2);"
+ "if r2 == r1 goto 1f;"
+ "r0 = 1;"
+"1:"
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_H, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -2))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("store-release, 32-bit")
+__success __success_unpriv __retval(0)
+__naked void store_release_32(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "w1 = 0x12345678;"
+ ".8byte %[store_release_insn];" // store_release((u32 *)(r10 - 4), w1);
+ "w2 = *(u32 *)(r10 - 4);"
+ "if r2 == r1 goto 1f;"
+ "r0 = 1;"
+"1:"
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_W, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -4))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("store-release, 64-bit")
+__success __success_unpriv __retval(0)
+__naked void store_release_64(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "r1 = 0x1234567890abcdef ll;"
+ ".8byte %[store_release_insn];" // store_release((u64 *)(r10 - 8), r1);
+ "r2 = *(u64 *)(r10 - 8);"
+ "if r2 == r1 goto 1f;"
+ "r0 = 1;"
+"1:"
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("store-release with uninitialized src_reg")
+__failure __failure_unpriv __msg("R2 !read_ok")
+__naked void store_release_with_uninitialized_src_reg(void)
+{
+ asm volatile (
+ ".8byte %[store_release_insn];" // store_release((u64 *)(r10 - 8), r2);
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_2, -8))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("store-release with uninitialized dst_reg")
+__failure __failure_unpriv __msg("R2 !read_ok")
+__naked void store_release_with_uninitialized_dst_reg(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ ".8byte %[store_release_insn];" // store_release((u64 *)(r2 - 8), r1);
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_2, BPF_REG_1, -8))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("store-release with non-pointer dst_reg")
+__failure __failure_unpriv __msg("R1 invalid mem access 'scalar'")
+__naked void store_release_with_non_pointer_dst_reg(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ ".8byte %[store_release_insn];" // store_release((u64 *)(r1 + 0), r1);
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_1, BPF_REG_1, 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("misaligned store-release")
+__failure __failure_unpriv __msg("misaligned stack access off")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void store_release_misaligned(void)
+{
+ asm volatile (
+ "w0 = 0;"
+ ".8byte %[store_release_insn];" // store_release((u32 *)(r10 - 5), w0);
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_W, BPF_STORE_REL, BPF_REG_10, BPF_REG_0, -5))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("store-release to ctx pointer")
+__failure __failure_unpriv __msg("BPF_ATOMIC stores into R1 ctx is not allowed")
+__naked void store_release_to_ctx_pointer(void)
+{
+ asm volatile (
+ "w0 = 0;"
+ // store_release((u8 *)(r1 + offsetof(struct __sk_buff, cb[0])), w0);
+ ".8byte %[store_release_insn];"
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("store-release to pkt pointer")
+__failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed")
+__naked void store_release_to_pkt_pointer(void)
+{
+ asm volatile (
+ "w0 = 0;"
+ "r2 = *(u32 *)(r1 + %[xdp_md_data]);"
+ "r3 = *(u32 *)(r1 + %[xdp_md_data_end]);"
+ "r1 = r2;"
+ "r1 += 8;"
+ "if r1 >= r3 goto l0_%=;"
+ ".8byte %[store_release_insn];" // store_release((u8 *)(r2 + 0), w0);
+"l0_%=: r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
+ __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_2, BPF_REG_0, 0))
+ : __clobber_all);
+}
+
+SEC("flow_dissector")
+__description("store-release to flow_keys pointer")
+__failure __msg("BPF_ATOMIC stores into R2 flow_keys is not allowed")
+__naked void store_release_to_flow_keys_pointer(void)
+{
+ asm volatile (
+ "w0 = 0;"
+ "r2 = *(u64 *)(r1 + %[__sk_buff_flow_keys]);"
+ ".8byte %[store_release_insn];" // store_release((u8 *)(r2 + 0), w0);
+ "exit;"
+ :
+ : __imm_const(__sk_buff_flow_keys,
+ offsetof(struct __sk_buff, flow_keys)),
+ __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_2, BPF_REG_0, 0))
+ : __clobber_all);
+}
+
+SEC("sk_reuseport")
+__description("store-release to sock pointer")
+__failure __msg("R2 cannot write into sock")
+__naked void store_release_to_sock_pointer(void)
+{
+ asm volatile (
+ "w0 = 0;"
+ "r2 = *(u64 *)(r1 + %[sk_reuseport_md_sk]);"
+ ".8byte %[store_release_insn];" // store_release((u8 *)(r2 + 0), w0);
+ "exit;"
+ :
+ : __imm_const(sk_reuseport_md_sk, offsetof(struct sk_reuseport_md, sk)),
+ __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_2, BPF_REG_0, 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("store-release, leak pointer to stack")
+__success __success_unpriv __retval(0)
+__naked void store_release_leak_pointer_to_stack(void)
+{
+ asm volatile (
+ ".8byte %[store_release_insn];" // store_release((u64 *)(r10 - 8), r1);
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8))
+ : __clobber_all);
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("socket")
+__description("store-release, leak pointer to map")
+__success __retval(0)
+__failure_unpriv __msg_unpriv("R6 leaks addr into map")
+__naked void store_release_leak_pointer_to_map(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "r1 = %[map_hash_8b] ll;"
+ "r2 = 0;"
+ "*(u64 *)(r10 - 8) = r2;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto l0_%=;"
+ ".8byte %[store_release_insn];" // store_release((u64 *)(r0 + 0), r6);
+"l0_%=:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_addr(map_hash_8b),
+ __imm(bpf_map_lookup_elem),
+ __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_0, BPF_REG_6, 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("store-release with invalid register R15")
+__failure __failure_unpriv __msg("R15 is invalid")
+__naked void store_release_with_invalid_reg(void)
+{
+ asm volatile (
+ ".8byte %[store_release_insn];" // store_release((u64 *)(r15 + 0), r1);
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, 15 /* invalid reg */, BPF_REG_1, 0))
+ : __clobber_all);
+}
+
+#else /* CAN_USE_LOAD_ACQ_STORE_REL */
+
+SEC("socket")
+__description("Clang version < 18, ENABLE_ATOMICS_TESTS not defined, and/or JIT doesn't support store-release, use a dummy test")
+__success
+int dummy_test(void)
+{
+ return 0;
+}
+
+#endif /* CAN_USE_LOAD_ACQ_STORE_REL */
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
new file mode 100644
index 000000000000..9d415f7ce599
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <errno.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include <../../../tools/include/linux/filter.h>
+
+int vals[] SEC(".data.vals") = {1, 2, 3, 4};
+
+__naked __noinline __used
+static unsigned long identity_subprog()
+{
+ /* the simplest *static* 64-bit identity function */
+ asm volatile (
+ "r0 = r1;"
+ "exit;"
+ );
+}
+
+__noinline __used
+unsigned long global_identity_subprog(__u64 x)
+{
+ /* the simplest *global* 64-bit identity function */
+ return x;
+}
+
+__naked __noinline __used
+static unsigned long callback_subprog()
+{
+ /* the simplest callback function */
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("7: (0f) r1 += r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4")
+__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
+__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1")
+__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5")
+__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int subprog_result_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+ /* pass r6 through r1 into subprog to get it back as r0;
+ * this whole chain will have to be marked as precise later
+ */
+ "r1 = r6;"
+ "call identity_subprog;"
+ /* now use subprog's returned value (which is a
+ * r6 -> r1 -> r0 chain), as index into vals array, forcing
+ * all of that to be known precisely
+ */
+ "r0 *= 4;"
+ "r1 = %[vals];"
+ /* here r0->r1->r6 chain is forced to be precise and has to be
+ * propagated back to the beginning, including through the
+ * subprog call
+ */
+ "r1 += r0;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+__naked __noinline __used
+static unsigned long fp_leaking_subprog()
+{
+ asm volatile (
+ ".8byte %[r0_eq_r10_cast_s8];"
+ "exit;"
+ :: __imm_insn(r0_eq_r10_cast_s8, BPF_MOVSX64_REG(BPF_REG_0, BPF_REG_10, 8))
+ );
+}
+
+__naked __noinline __used
+static unsigned long sneaky_fp_leaking_subprog()
+{
+ asm volatile (
+ "r1 = r10;"
+ ".8byte %[r0_eq_r1_cast_s8];"
+ "exit;"
+ :: __imm_insn(r0_eq_r1_cast_s8, BPF_MOVSX64_REG(BPF_REG_0, BPF_REG_1, 8))
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("6: (0f) r1 += r0")
+__msg("mark_precise: frame0: last_idx 6 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4")
+__msg("mark_precise: frame0: regs=r0 stack= before 3: (57) r0 &= 3")
+__msg("mark_precise: frame0: regs=r0 stack= before 10: (95) exit")
+__msg("mark_precise: frame1: regs=r0 stack= before 9: (bf) r0 = (s8)r10")
+__msg("7: R0_w=scalar")
+__naked int fp_precise_subprog_result(void)
+{
+ asm volatile (
+ "call fp_leaking_subprog;"
+ /* use subprog's returned value (which is derived from r10=fp
+ * register), as index into vals array, forcing all of that to
+ * be known precisely
+ */
+ "r0 &= 3;"
+ "r0 *= 4;"
+ "r1 = %[vals];"
+ /* force precision marking */
+ "r1 += r0;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("6: (0f) r1 += r0")
+__msg("mark_precise: frame0: last_idx 6 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4")
+__msg("mark_precise: frame0: regs=r0 stack= before 3: (57) r0 &= 3")
+__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
+__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = (s8)r1")
+/* here r1 is marked precise, even though it's fp register, but that's fine
+ * because by the time we get out of subprogram it has to be derived from r10
+ * anyways, at which point we'll break precision chain
+ */
+__msg("mark_precise: frame1: regs=r1 stack= before 9: (bf) r1 = r10")
+__msg("7: R0_w=scalar")
+__naked int sneaky_fp_precise_subprog_result(void)
+{
+ asm volatile (
+ "call sneaky_fp_leaking_subprog;"
+ /* use subprog's returned value (which is derived from r10=fp
+ * register), as index into vals array, forcing all of that to
+ * be known precisely
+ */
+ "r0 &= 3;"
+ "r0 *= 4;"
+ "r1 = %[vals];"
+ /* force precision marking */
+ "r1 += r0;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("9: (0f) r1 += r0")
+__msg("mark_precise: frame0: last_idx 9 first_idx 0")
+__msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4")
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7")
+__naked int global_subprog_result_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+ /* pass r6 through r1 into subprog to get it back as r0;
+ * given global_identity_subprog is global, precision won't
+ * propagate all the way back to r6
+ */
+ "r1 = r6;"
+ "call global_identity_subprog;"
+ /* now use subprog's returned value (which is unknown now, so
+ * we need to clamp it), as index into vals array, forcing r0
+ * to be marked precise (with no effect on r6, though)
+ */
+ "if r0 < %[vals_arr_sz] goto 1f;"
+ "r0 = %[vals_arr_sz] - 1;"
+ "1:"
+ "r0 *= 4;"
+ "r1 = %[vals];"
+ /* here r0 is forced to be precise and has to be
+ * propagated back to the global subprog call, but it
+ * shouldn't go all the way to mark r6 as precise
+ */
+ "r1 += r0;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals),
+ __imm_const(vals_arr_sz, ARRAY_SIZE(vals))
+ : __clobber_common, "r6"
+ );
+}
+
+__naked __noinline __used
+static unsigned long loop_callback_bad()
+{
+ /* bpf_loop() callback that can return values outside of [0, 1] range */
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 s> 1000 goto 1f;"
+ "r0 = 0;"
+ "1:"
+ "goto +0;" /* checkpoint */
+ /* bpf_loop() expects [0, 1] values, so branch above skipping
+ * r0 = 0; should lead to a failure, but if exit instruction
+ * doesn't enforce r0's precision, this callback will be
+ * successfully verified
+ */
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_common
+ );
+}
+
+SEC("?raw_tp")
+__failure __log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+/* check that fallthrough code path marks r0 as precise */
+__msg("mark_precise: frame1: regs=r0 stack= before 11: (b7) r0 = 0")
+/* check that we have branch code path doing its own validation */
+__msg("from 10 to 12: frame1: R0=scalar(smin=umin=1001")
+/* check that branch code path marks r0 as precise, before failing */
+__msg("mark_precise: frame1: regs=r0 stack= before 9: (85) call bpf_get_prandom_u32#7")
+__msg("At callback return the register R0 has smin=1001 should have been in [0, 1]")
+__naked int callback_precise_return_fail(void)
+{
+ asm volatile (
+ "r1 = 1;" /* nr_loops */
+ "r2 = %[loop_callback_bad];" /* callback_fn */
+ "r3 = 0;" /* callback_ctx */
+ "r4 = 0;" /* flags */
+ "call %[bpf_loop];"
+
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_ptr(loop_callback_bad),
+ __imm(bpf_loop)
+ : __clobber_common
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+/* First simulated path does not include callback body,
+ * r1 and r4 are always precise for bpf_loop() calls.
+ */
+__msg("9: (85) call bpf_loop#181")
+__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
+__msg("mark_precise: frame0: parent state regs=r4 stack=:")
+__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
+__msg("mark_precise: frame0: regs=r4 stack= before 8: (b7) r4 = 0")
+__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
+__msg("mark_precise: frame0: parent state regs=r1 stack=:")
+__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
+__msg("mark_precise: frame0: regs=r1 stack= before 8: (b7) r4 = 0")
+__msg("mark_precise: frame0: regs=r1 stack= before 7: (b7) r3 = 0")
+__msg("mark_precise: frame0: regs=r1 stack= before 6: (bf) r2 = r8")
+__msg("mark_precise: frame0: regs=r1 stack= before 5: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
+/* r6 precision propagation */
+__msg("14: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 14 first_idx 9")
+__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
+__msg("mark_precise: frame0: regs=r0,r6 stack= before 10: (bf) r6 = r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop")
+/* State entering callback body popped from states stack */
+__msg("from 9 to 17: frame1:")
+__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
+__msg("17: (b7) r0 = 0")
+__msg("18: (95) exit")
+__msg("returning from callee:")
+__msg("to caller at 9:")
+__msg("frame 0: propagating r1,r4")
+__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r1,r4 stack= before 18: (95) exit")
+__msg("from 18 to 9: safe")
+__naked int callback_result_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+
+ /* call subprog and use result; r0 shouldn't propagate back to
+ * callback_subprog
+ */
+ "r1 = r6;" /* nr_loops */
+ "r2 = %[callback_subprog];" /* callback_fn */
+ "r3 = 0;" /* callback_ctx */
+ "r4 = 0;" /* flags */
+ "call %[bpf_loop];"
+
+ "r6 = r0;"
+ "if r6 > 3 goto 1f;"
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the bpf_loop() call, but not beyond
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "1:"
+ "exit;"
+ :
+ : __imm_ptr(vals),
+ __imm_ptr(callback_subprog),
+ __imm(bpf_loop)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("7: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 7 first_idx 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1")
+__msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5")
+__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int parent_callee_saved_reg_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 0;"
+ "call identity_subprog;"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("7: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 7 first_idx 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5")
+__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int parent_callee_saved_reg_precise_global(void)
+{
+ asm volatile (
+ "r6 = 3;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 0;"
+ "call global_identity_subprog;"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+/* First simulated path does not include callback body */
+__msg("12: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 12 first_idx 9")
+__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 9: (85) call bpf_loop")
+__msg("mark_precise: frame0: parent state regs=r6 stack=:")
+__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
+__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
+__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
+__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
+/* State entering callback body popped from states stack */
+__msg("from 9 to 15: frame1:")
+__msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb")
+__msg("15: (b7) r0 = 0")
+__msg("16: (95) exit")
+__msg("returning from callee:")
+__msg("to caller at 9:")
+/* r1, r4 are always precise for bpf_loop(),
+ * r6 was marked before backtracking to callback body.
+ */
+__msg("frame 0: propagating r1,r4,r6")
+__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r1,r4,r6 stack= before 16: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
+__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop")
+__msg("mark_precise: frame0: parent state regs= stack=:")
+__msg("from 16 to 9: safe")
+__naked int parent_callee_saved_reg_precise_with_callback(void)
+{
+ asm volatile (
+ "r6 = 3;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 1;" /* nr_loops */
+ "r2 = %[callback_subprog];" /* callback_fn */
+ "r3 = 0;" /* callback_ctx */
+ "r4 = 0;" /* flags */
+ "call %[bpf_loop];"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) callback call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals),
+ __imm_ptr(callback_subprog),
+ __imm(bpf_loop)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("9: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 9 first_idx 6")
+__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: parent state regs= stack=-8:")
+__msg("mark_precise: frame0: last_idx 13 first_idx 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1")
+__msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6")
+__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int parent_stack_slot_precise(void)
+{
+ asm volatile (
+ /* spill reg */
+ "r6 = 3;"
+ "*(u64 *)(r10 - 8) = r6;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 0;"
+ "call identity_subprog;"
+
+ /* restore reg from stack; in this case we'll be carrying
+ * stack mask when going back into subprog through jump
+ * history
+ */
+ "r6 = *(u64 *)(r10 - 8);"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("9: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 9 first_idx 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6")
+__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int parent_stack_slot_precise_global(void)
+{
+ asm volatile (
+ /* spill reg */
+ "r6 = 3;"
+ "*(u64 *)(r10 - 8) = r6;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 0;"
+ "call global_identity_subprog;"
+
+ /* restore reg from stack; in this case we'll be carrying
+ * stack mask when going back into subprog through jump
+ * history
+ */
+ "r6 = *(u64 *)(r10 - 8);"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+/* First simulated path does not include callback body */
+__msg("14: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 14 first_idx 10")
+__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: regs= stack=-8 before 10: (85) call bpf_loop")
+__msg("mark_precise: frame0: parent state regs= stack=-8:")
+__msg("mark_precise: frame0: last_idx 9 first_idx 0 subseq_idx 10")
+__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
+__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
+/* State entering callback body popped from states stack */
+__msg("from 10 to 17: frame1:")
+__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
+__msg("17: (b7) r0 = 0")
+__msg("18: (95) exit")
+__msg("returning from callee:")
+__msg("to caller at 10:")
+/* r1, r4 are always precise for bpf_loop(),
+ * fp-8 was marked before backtracking to callback body.
+ */
+__msg("frame 0: propagating r1,r4,fp-8")
+__msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r1,r4 stack=-8 before 18: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
+__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
+__msg("mark_precise: frame0: parent state regs= stack=:")
+__msg("from 18 to 10: safe")
+__naked int parent_stack_slot_precise_with_callback(void)
+{
+ asm volatile (
+ /* spill reg */
+ "r6 = 3;"
+ "*(u64 *)(r10 - 8) = r6;"
+
+ /* ensure we have callback frame in jump history */
+ "r1 = r6;" /* nr_loops */
+ "r2 = %[callback_subprog];" /* callback_fn */
+ "r3 = 0;" /* callback_ctx */
+ "r4 = 0;" /* flags */
+ "call %[bpf_loop];"
+
+ /* restore reg from stack; in this case we'll be carrying
+ * stack mask when going back into subprog through jump
+ * history
+ */
+ "r6 = *(u64 *)(r10 - 8);"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals),
+ __imm_ptr(callback_subprog),
+ __imm(bpf_loop)
+ : __clobber_common, "r6"
+ );
+}
+
+__noinline __used
+static __u64 subprog_with_precise_arg(__u64 x)
+{
+ return vals[x]; /* x is forced to be precise */
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("8: (0f) r2 += r1")
+__msg("mark_precise: frame1: last_idx 8 first_idx 0")
+__msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ")
+__msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2")
+__msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2")
+__msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3")
+__naked int subprog_arg_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+ "r1 = r6;"
+ /* subprog_with_precise_arg expects its argument to be
+ * precise, so r1->r6 will be marked precise from inside the
+ * subprog
+ */
+ "call subprog_with_precise_arg;"
+ "r0 += r6;"
+ "exit;"
+ :
+ :
+ : __clobber_common, "r6"
+ );
+}
+
+/* r1 is pointer to stack slot;
+ * r2 is a register to spill into that slot
+ * subprog also spills r2 into its own stack slot
+ */
+__naked __noinline __used
+static __u64 subprog_spill_reg_precise(void)
+{
+ asm volatile (
+ /* spill to parent stack */
+ "*(u64 *)(r1 + 0) = r2;"
+ /* spill to subprog stack (we use -16 offset to avoid
+ * accidental confusion with parent's -8 stack slot in
+ * verifier log output)
+ */
+ "*(u64 *)(r10 - 16) = r2;"
+ /* use both spills as return result to propagete precision everywhere */
+ "r0 = *(u64 *)(r10 - 16);"
+ "r2 = *(u64 *)(r1 + 0);"
+ "r0 += r2;"
+ "exit;"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("10: (0f) r1 += r7")
+__msg("mark_precise: frame0: last_idx 10 first_idx 7 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r7 stack= before 9: (bf) r1 = r8")
+__msg("mark_precise: frame0: regs=r7 stack= before 8: (27) r7 *= 4")
+__msg("mark_precise: frame0: regs=r7 stack= before 7: (79) r7 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=2 R6_w=1 R8_rw=map_value(map=.data.vals,ks=4,vs=16) R10=fp0 fp-8_rw=P1")
+__msg("mark_precise: frame0: last_idx 18 first_idx 0 subseq_idx 7")
+__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 17: (0f) r0 += r2")
+__msg("mark_precise: frame1: regs= stack= before 16: (79) r2 = *(u64 *)(r1 +0)")
+__msg("mark_precise: frame1: regs= stack= before 15: (79) r0 = *(u64 *)(r10 -16)")
+__msg("mark_precise: frame1: regs= stack= before 14: (7b) *(u64 *)(r10 -16) = r2")
+__msg("mark_precise: frame1: regs= stack= before 13: (7b) *(u64 *)(r1 +0) = r2")
+__msg("mark_precise: frame1: regs=r2 stack= before 6: (85) call pc+6")
+__msg("mark_precise: frame0: regs=r2 stack= before 5: (bf) r2 = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 4: (07) r1 += -8")
+__msg("mark_precise: frame0: regs=r6 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1")
+__naked int subprog_spill_into_parent_stack_slot_precise(void)
+{
+ asm volatile (
+ "r6 = 1;"
+
+ /* pass pointer to stack slot and r6 to subprog;
+ * r6 will be marked precise and spilled into fp-8 slot, which
+ * also should be marked precise
+ */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = r6;"
+ "call subprog_spill_reg_precise;"
+
+ /* restore reg from stack; in this case we'll be carrying
+ * stack mask when going back into subprog through jump
+ * history
+ */
+ "r7 = *(u64 *)(r10 - 8);"
+
+ "r7 *= 4;"
+ "r1 = %[vals];"
+ /* here r7 is forced to be precise and has to be propagated
+ * back to the beginning, handling subprog call and logic
+ */
+ "r1 += r7;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6", "r7"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("17: (0f) r1 += r0")
+__msg("mark_precise: frame0: last_idx 17 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r0 stack= before 16: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r0 stack= before 15: (27) r0 *= 4")
+__msg("mark_precise: frame0: regs=r0 stack= before 14: (79) r0 = *(u64 *)(r10 -16)")
+__msg("mark_precise: frame0: regs= stack=-16 before 13: (7b) *(u64 *)(r7 -8) = r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 12: (79) r0 = *(u64 *)(r8 +16)")
+__msg("mark_precise: frame0: regs= stack=-16 before 11: (7b) *(u64 *)(r8 +16) = r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 10: (79) r0 = *(u64 *)(r7 -8)")
+__msg("mark_precise: frame0: regs= stack=-16 before 9: (7b) *(u64 *)(r10 -16) = r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 8: (07) r8 += -32")
+__msg("mark_precise: frame0: regs=r0 stack= before 7: (bf) r8 = r10")
+__msg("mark_precise: frame0: regs=r0 stack= before 6: (07) r7 += -8")
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r7 = r10")
+__msg("mark_precise: frame0: regs=r0 stack= before 21: (95) exit")
+__msg("mark_precise: frame1: regs=r0 stack= before 20: (bf) r0 = r1")
+__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+15")
+__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1")
+__naked int stack_slot_aliases_precision(void)
+{
+ asm volatile (
+ "r6 = 1;"
+ /* pass r6 through r1 into subprog to get it back as r0;
+ * this whole chain will have to be marked as precise later
+ */
+ "r1 = r6;"
+ "call identity_subprog;"
+ /* let's setup two registers that are aliased to r10 */
+ "r7 = r10;"
+ "r7 += -8;" /* r7 = r10 - 8 */
+ "r8 = r10;"
+ "r8 += -32;" /* r8 = r10 - 32 */
+ /* now spill subprog's return value (a r6 -> r1 -> r0 chain)
+ * a few times through different stack pointer regs, making
+ * sure to use r10, r7, and r8 both in LDX and STX insns, and
+ * *importantly* also using a combination of const var_off and
+ * insn->off to validate that we record final stack slot
+ * correctly, instead of relying on just insn->off derivation,
+ * which is only valid for r10-based stack offset
+ */
+ "*(u64 *)(r10 - 16) = r0;"
+ "r0 = *(u64 *)(r7 - 8);" /* r7 - 8 == r10 - 16 */
+ "*(u64 *)(r8 + 16) = r0;" /* r8 + 16 = r10 - 16 */
+ "r0 = *(u64 *)(r8 + 16);"
+ "*(u64 *)(r7 - 8) = r0;"
+ "r0 = *(u64 *)(r10 - 16);"
+ /* get ready to use r0 as an index into array to force precision */
+ "r0 *= 4;"
+ "r1 = %[vals];"
+ /* here r0->r1->r6 chain is forced to be precise and has to be
+ * propagated back to the beginning, including through the
+ * subprog call and all the stack spills and loads
+ */
+ "r1 += r0;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_subreg.c b/tools/testing/selftests/bpf/progs/verifier_subreg.c
new file mode 100644
index 000000000000..8613ea160dcd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_subreg.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/subreg.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/* This file contains sub-register zero extension checks for insns defining
+ * sub-registers, meaning:
+ * - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width
+ * forms (BPF_END) could define sub-registers.
+ * - Narrow direct loads, BPF_B/H/W | BPF_LDX.
+ * - BPF_LD is not exposed to JIT back-ends, so no need for testing.
+ *
+ * "get_prandom_u32" is used to initialize low 32-bit of some registers to
+ * prevent potential optimizations done by verifier or JIT back-ends which could
+ * optimize register back into constant when range info shows one register is a
+ * constant.
+ */
+
+SEC("socket")
+__description("add32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void add32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = r0; \
+ r0 = 0x100000000 ll; \
+ w0 += w1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("add32 imm zero extend check")
+__success __success_unpriv __retval(0)
+__naked void add32_imm_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ /* An insn could have no effect on the low 32-bit, for example:\
+ * a = a + 0 \
+ * a = a | 0 \
+ * a = a & -1 \
+ * But, they should still zero high 32-bit. \
+ */ \
+ w0 += 0; \
+ r0 >>= 32; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 += -2; \
+ r0 >>= 32; \
+ r0 |= r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("sub32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void sub32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = r0; \
+ r0 = 0x1ffffffff ll; \
+ w0 -= w1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("sub32 imm zero extend check")
+__success __success_unpriv __retval(0)
+__naked void sub32_imm_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 -= 0; \
+ r0 >>= 32; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 -= 1; \
+ r0 >>= 32; \
+ r0 |= r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("mul32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void mul32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = r0; \
+ r0 = 0x100000001 ll; \
+ w0 *= w1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("mul32 imm zero extend check")
+__success __success_unpriv __retval(0)
+__naked void mul32_imm_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 *= 1; \
+ r0 >>= 32; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 *= -1; \
+ r0 >>= 32; \
+ r0 |= r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("div32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void div32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = r0; \
+ r0 = -1; \
+ w0 /= w1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("div32 imm zero extend check")
+__success __success_unpriv __retval(0)
+__naked void div32_imm_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 /= 1; \
+ r0 >>= 32; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 /= 2; \
+ r0 >>= 32; \
+ r0 |= r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("or32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void or32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = r0; \
+ r0 = 0x100000001 ll; \
+ w0 |= w1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("or32 imm zero extend check")
+__success __success_unpriv __retval(0)
+__naked void or32_imm_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 |= 0; \
+ r0 >>= 32; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 |= 1; \
+ r0 >>= 32; \
+ r0 |= r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("and32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void and32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x100000000 ll; \
+ r1 |= r0; \
+ r0 = 0x1ffffffff ll; \
+ w0 &= w1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("and32 imm zero extend check")
+__success __success_unpriv __retval(0)
+__naked void and32_imm_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 &= -1; \
+ r0 >>= 32; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 &= -2; \
+ r0 >>= 32; \
+ r0 |= r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("lsh32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void lsh32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x100000000 ll; \
+ r0 |= r1; \
+ r1 = 1; \
+ w0 <<= w1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("lsh32 imm zero extend check")
+__success __success_unpriv __retval(0)
+__naked void lsh32_imm_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 <<= 0; \
+ r0 >>= 32; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 <<= 1; \
+ r0 >>= 32; \
+ r0 |= r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("rsh32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void rsh32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ r1 = 1; \
+ w0 >>= w1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("rsh32 imm zero extend check")
+__success __success_unpriv __retval(0)
+__naked void rsh32_imm_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 >>= 0; \
+ r0 >>= 32; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 >>= 1; \
+ r0 >>= 32; \
+ r0 |= r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("neg32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void neg32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 = -w0; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("mod32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void mod32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = r0; \
+ r0 = -1; \
+ w0 %%= w1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("mod32 imm zero extend check")
+__success __success_unpriv __retval(0)
+__naked void mod32_imm_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 %%= 1; \
+ r0 >>= 32; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 %%= 2; \
+ r0 >>= 32; \
+ r0 |= r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("xor32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void xor32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = r0; \
+ r0 = 0x100000000 ll; \
+ w0 ^= w1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("xor32 imm zero extend check")
+__success __success_unpriv __retval(0)
+__naked void xor32_imm_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 ^= 1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("mov32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void mov32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x100000000 ll; \
+ r1 |= r0; \
+ r0 = 0x100000000 ll; \
+ w0 = w1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("mov32 imm zero extend check")
+__success __success_unpriv __retval(0)
+__naked void mov32_imm_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 = 0; \
+ r0 >>= 32; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 = 1; \
+ r0 >>= 32; \
+ r0 |= r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("arsh32 reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void arsh32_reg_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ r1 = 1; \
+ w0 s>>= w1; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("arsh32 imm zero extend check")
+__success __success_unpriv __retval(0)
+__naked void arsh32_imm_zero_extend_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 s>>= 0; \
+ r0 >>= 32; \
+ r6 = r0; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w0 s>>= 1; \
+ r0 >>= 32; \
+ r0 |= r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("end16 (to_le) reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void le_reg_zero_extend_check_1(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r6 = r0; \
+ r6 <<= 32; \
+ call %[bpf_get_prandom_u32]; \
+ r0 |= r6; \
+ r0 = le16 r0; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("end32 (to_le) reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void le_reg_zero_extend_check_2(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r6 = r0; \
+ r6 <<= 32; \
+ call %[bpf_get_prandom_u32]; \
+ r0 |= r6; \
+ r0 = le32 r0; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("end16 (to_be) reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void be_reg_zero_extend_check_1(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r6 = r0; \
+ r6 <<= 32; \
+ call %[bpf_get_prandom_u32]; \
+ r0 |= r6; \
+ r0 = be16 r0; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("end32 (to_be) reg zero extend check")
+__success __success_unpriv __retval(0)
+__naked void be_reg_zero_extend_check_2(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r6 = r0; \
+ r6 <<= 32; \
+ call %[bpf_get_prandom_u32]; \
+ r0 |= r6; \
+ r0 = be32 r0; \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("ldx_b zero extend check")
+__success __success_unpriv __retval(0)
+__naked void ldx_b_zero_extend_check(void)
+{
+ asm volatile (" \
+ r6 = r10; \
+ r6 += -4; \
+ r7 = 0xfaceb00c; \
+ *(u32*)(r6 + 0) = r7; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ r0 = *(u8*)(r6 + 0); \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("ldx_h zero extend check")
+__success __success_unpriv __retval(0)
+__naked void ldx_h_zero_extend_check(void)
+{
+ asm volatile (" \
+ r6 = r10; \
+ r6 += -4; \
+ r7 = 0xfaceb00c; \
+ *(u32*)(r6 + 0) = r7; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ r0 = *(u16*)(r6 + 0); \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("ldx_w zero extend check")
+__success __success_unpriv __retval(0)
+__naked void ldx_w_zero_extend_check(void)
+{
+ asm volatile (" \
+ r6 = r10; \
+ r6 += -4; \
+ r7 = 0xfaceb00c; \
+ *(u32*)(r6 + 0) = r7; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ r0 = *(u32*)(r6 + 0); \
+ r0 >>= 32; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_tailcall.c b/tools/testing/selftests/bpf/progs/verifier_tailcall.c
new file mode 100644
index 000000000000..b4acce60fb9b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_tailcall.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} map_array SEC(".maps");
+
+SEC("socket")
+__description("invalid map type for tail call")
+__failure __msg("expected prog array map for tail call")
+__failure_unpriv
+__naked void invalid_map_for_tail_call(void)
+{
+ asm volatile (" \
+ r2 = %[map_array] ll; \
+ r3 = 0; \
+ call %[bpf_tail_call]; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_array)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c b/tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c
new file mode 100644
index 000000000000..8d60c634a114
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+int main(void);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __array(values, void (void));
+} jmp_table SEC(".maps") = {
+ .values = {
+ [0] = (void *) &main,
+ },
+};
+
+__noinline __auxiliary
+static __naked int sub(void)
+{
+ asm volatile (
+ "r2 = %[jmp_table] ll;"
+ "r3 = 0;"
+ "call 12;"
+ "exit;"
+ :
+ : __imm_addr(jmp_table)
+ : __clobber_all);
+}
+
+__success
+__arch_x86_64
+/* program entry for main(), regular function prologue */
+__jited(" endbr64")
+__jited(" nopl (%rax,%rax)")
+__jited(" xorq %rax, %rax")
+__jited(" pushq %rbp")
+__jited(" movq %rsp, %rbp")
+/* tail call prologue for program:
+ * - establish memory location for tail call counter at &rbp[-8];
+ * - spill tail_call_cnt_ptr at &rbp[-16];
+ * - expect tail call counter to be passed in rax;
+ * - for entry program rax is a raw counter, value < 33;
+ * - for tail called program rax is tail_call_cnt_ptr (value > 33).
+ */
+__jited(" endbr64")
+__jited(" cmpq $0x21, %rax")
+__jited(" ja L0")
+__jited(" pushq %rax")
+__jited(" movq %rsp, %rax")
+__jited(" jmp L1")
+__jited("L0: pushq %rax") /* rbp[-8] = rax */
+__jited("L1: pushq %rax") /* rbp[-16] = rax */
+/* on subprogram call restore rax to be tail_call_cnt_ptr from rbp[-16]
+ * (cause original rax might be clobbered by this point)
+ */
+__jited(" movq -0x10(%rbp), %rax")
+__jited(" callq 0x{{.*}}") /* call to sub() */
+__jited(" xorl %eax, %eax")
+__jited(" leave")
+__jited(" {{(retq|jmp 0x)}}") /* return or jump to rethunk */
+__jited("...")
+/* subprogram entry for sub(), regular function prologue */
+__jited(" endbr64")
+__jited(" nopl (%rax,%rax)")
+__jited(" nopl (%rax)")
+__jited(" pushq %rbp")
+__jited(" movq %rsp, %rbp")
+/* tail call prologue for subprogram address of tail call counter
+ * stored at rbp[-16].
+ */
+__jited(" endbr64")
+__jited(" pushq %rax") /* rbp[-8] = rax */
+__jited(" pushq %rax") /* rbp[-16] = rax */
+__jited(" movabsq ${{.*}}, %rsi") /* r2 = &jmp_table */
+__jited(" xorl %edx, %edx") /* r3 = 0 */
+/* bpf_tail_call implementation:
+ * - load tail_call_cnt_ptr from rbp[-16];
+ * - if *tail_call_cnt_ptr < 33, increment it and jump to target;
+ * - otherwise do nothing.
+ */
+__jited(" movq -0x10(%rbp), %rax")
+__jited(" cmpq $0x21, (%rax)")
+__jited(" jae L0")
+__jited(" nopl (%rax,%rax)")
+__jited(" addq $0x1, (%rax)") /* *tail_call_cnt_ptr += 1 */
+__jited(" popq %rax")
+__jited(" popq %rax")
+__jited(" jmp {{.*}}") /* jump to tail call tgt */
+__jited("L0: leave")
+__jited(" {{(retq|jmp 0x)}}") /* return or jump to rethunk */
+SEC("tc")
+__naked int main(void)
+{
+ asm volatile (
+ "call %[sub];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(sub)
+ : __clobber_all);
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_typedef.c b/tools/testing/selftests/bpf/progs/verifier_typedef.c
new file mode 100644
index 000000000000..08481cfaac4b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_typedef.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("fentry/bpf_fentry_test_sinfo")
+__description("typedef: resolve")
+__success __retval(0)
+__naked void resolve_typedef(void)
+{
+ asm volatile (" \
+ r1 = *(u64 *)(r1 +0); \
+ r2 = *(u64 *)(r1 +%[frags_offs]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(frags_offs,
+ offsetof(struct skb_shared_info, frags))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_uninit.c b/tools/testing/selftests/bpf/progs/verifier_uninit.c
new file mode 100644
index 000000000000..7718cd7d19ce
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_uninit.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/uninit.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("read uninitialized register")
+__failure __msg("R2 !read_ok")
+__failure_unpriv
+__naked void read_uninitialized_register(void)
+{
+ asm volatile (" \
+ r0 = r2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("read invalid register")
+__failure __msg("R15 is invalid")
+__failure_unpriv
+__naked void read_invalid_register(void)
+{
+ asm volatile (" \
+ .8byte %[mov64_reg]; \
+ exit; \
+" :
+ : __imm_insn(mov64_reg, BPF_MOV64_REG(BPF_REG_0, -1))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("program doesn't init R0 before exit")
+__failure __msg("R0 !read_ok")
+__failure_unpriv
+__naked void t_init_r0_before_exit(void)
+{
+ asm volatile (" \
+ r2 = r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("program doesn't init R0 before exit in all branches")
+__failure __msg("R0 !read_ok")
+__msg_unpriv("R1 pointer comparison")
+__naked void before_exit_in_all_branches(void)
+{
+ asm volatile (" \
+ if r1 >= 0 goto l0_%=; \
+ r0 = 1; \
+ r0 += 2; \
+l0_%=: exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c
new file mode 100644
index 000000000000..28b4f7035ceb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c
@@ -0,0 +1,953 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+#define BPF_SK_LOOKUP(func) \
+ /* struct bpf_sock_tuple tuple = {} */ \
+ "r2 = 0;" \
+ "*(u32*)(r10 - 8) = r2;" \
+ "*(u64*)(r10 - 16) = r2;" \
+ "*(u64*)(r10 - 24) = r2;" \
+ "*(u64*)(r10 - 32) = r2;" \
+ "*(u64*)(r10 - 40) = r2;" \
+ "*(u64*)(r10 - 48) = r2;" \
+ /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
+ "r2 = r10;" \
+ "r2 += -48;" \
+ "r3 = %[sizeof_bpf_sock_tuple];"\
+ "r4 = 0;" \
+ "r5 = 0;" \
+ "call %[" #func "];"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+void dummy_prog_42_socket(void);
+void dummy_prog_24_socket(void);
+void dummy_prog_loop1_socket(void);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 4);
+ __uint(key_size, sizeof(int));
+ __array(values, void (void));
+} map_prog1_socket SEC(".maps") = {
+ .values = {
+ [0] = (void *)&dummy_prog_42_socket,
+ [1] = (void *)&dummy_prog_loop1_socket,
+ [2] = (void *)&dummy_prog_24_socket,
+ },
+};
+
+SEC("socket")
+__auxiliary __auxiliary_unpriv
+__naked void dummy_prog_42_socket(void)
+{
+ asm volatile ("r0 = 42; exit;");
+}
+
+SEC("socket")
+__auxiliary __auxiliary_unpriv
+__naked void dummy_prog_24_socket(void)
+{
+ asm volatile ("r0 = 24; exit;");
+}
+
+SEC("socket")
+__auxiliary __auxiliary_unpriv
+__naked void dummy_prog_loop1_socket(void)
+{
+ asm volatile (" \
+ r3 = 1; \
+ r2 = %[map_prog1_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 41; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: return pointer")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(POINTER_VALUE)
+__naked void unpriv_return_pointer(void)
+{
+ asm volatile (" \
+ r0 = r10; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: add const to pointer")
+__success __success_unpriv __retval(0)
+__naked void unpriv_add_const_to_pointer(void)
+{
+ asm volatile (" \
+ r1 += 8; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: add pointer to pointer")
+__failure __msg("R1 pointer += pointer")
+__failure_unpriv
+__naked void unpriv_add_pointer_to_pointer(void)
+{
+ asm volatile (" \
+ r1 += r10; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: neg pointer")
+__success __failure_unpriv __msg_unpriv("R1 pointer arithmetic")
+__retval(0)
+__naked void unpriv_neg_pointer(void)
+{
+ asm volatile (" \
+ r1 = -r1; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: cmp pointer with const")
+__success __failure_unpriv __msg_unpriv("R1 pointer comparison")
+__retval(0)
+__naked void unpriv_cmp_pointer_with_const(void)
+{
+ asm volatile (" \
+ if r1 == 0 goto l0_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: cmp pointer with pointer")
+__success __failure_unpriv __msg_unpriv("R10 pointer comparison")
+__retval(0)
+__naked void unpriv_cmp_pointer_with_pointer(void)
+{
+ asm volatile (" \
+ if r1 == r10 goto l0_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tracepoint")
+__description("unpriv: check that printk is disallowed")
+__success
+__naked void check_that_printk_is_disallowed(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r1 = r10; \
+ r1 += -8; \
+ r2 = 8; \
+ r3 = r1; \
+ call %[bpf_trace_printk]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_trace_printk)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: pass pointer to helper function")
+__success __failure_unpriv __msg_unpriv("R4 leaks addr")
+__retval(0)
+__naked void pass_pointer_to_helper_function(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ r3 = r2; \
+ r4 = r2; \
+ call %[bpf_map_update_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_update_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: indirectly pass pointer on stack to helper function")
+__success __failure_unpriv
+__msg_unpriv("invalid read from stack R2 off -8+0 size 8")
+__retval(0)
+__naked void on_stack_to_helper_function(void)
+{
+ asm volatile (" \
+ *(u64*)(r10 - 8) = r10; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: mangle pointer on stack 1")
+__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
+__retval(0)
+__naked void mangle_pointer_on_stack_1(void)
+{
+ asm volatile (" \
+ *(u64*)(r10 - 8) = r10; \
+ r0 = 0; \
+ *(u32*)(r10 - 8) = r0; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: mangle pointer on stack 2")
+__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
+__retval(0)
+__naked void mangle_pointer_on_stack_2(void)
+{
+ asm volatile (" \
+ *(u64*)(r10 - 8) = r10; \
+ r0 = 0; \
+ *(u8*)(r10 - 1) = r0; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: read pointer from stack in small chunks")
+__failure __msg("invalid size")
+__failure_unpriv
+__naked void from_stack_in_small_chunks(void)
+{
+ asm volatile (" \
+ *(u64*)(r10 - 8) = r10; \
+ r0 = *(u32*)(r10 - 8); \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: write pointer into ctx")
+__failure __msg("invalid bpf_context access")
+__failure_unpriv __msg_unpriv("R1 leaks addr")
+__naked void unpriv_write_pointer_into_ctx(void)
+{
+ asm volatile (" \
+ *(u64*)(r1 + 0) = r1; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: spill/fill of ctx")
+__success __success_unpriv __retval(0)
+__naked void unpriv_spill_fill_of_ctx(void)
+{
+ asm volatile (" \
+ r6 = r10; \
+ r6 += -8; \
+ *(u64*)(r6 + 0) = r1; \
+ r1 = *(u64*)(r6 + 0); \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("unpriv: spill/fill of ctx 2")
+__success __retval(0)
+__naked void spill_fill_of_ctx_2(void)
+{
+ asm volatile (" \
+ r6 = r10; \
+ r6 += -8; \
+ *(u64*)(r6 + 0) = r1; \
+ r1 = *(u64*)(r6 + 0); \
+ call %[bpf_get_hash_recalc]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_hash_recalc)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("unpriv: spill/fill of ctx 3")
+__failure __msg("R1 type=fp expected=ctx")
+__naked void spill_fill_of_ctx_3(void)
+{
+ asm volatile (" \
+ r6 = r10; \
+ r6 += -8; \
+ *(u64*)(r6 + 0) = r1; \
+ *(u64*)(r6 + 0) = r10; \
+ r1 = *(u64*)(r6 + 0); \
+ call %[bpf_get_hash_recalc]; \
+ exit; \
+" :
+ : __imm(bpf_get_hash_recalc)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("unpriv: spill/fill of ctx 4")
+__failure __msg("R1 type=scalar expected=ctx")
+__naked void spill_fill_of_ctx_4(void)
+{
+ asm volatile (" \
+ r6 = r10; \
+ r6 += -8; \
+ *(u64*)(r6 + 0) = r1; \
+ r0 = 1; \
+ lock *(u64 *)(r10 - 8) += r0; \
+ r1 = *(u64*)(r6 + 0); \
+ call %[bpf_get_hash_recalc]; \
+ exit; \
+" :
+ : __imm(bpf_get_hash_recalc)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("unpriv: spill/fill of different pointers stx")
+__failure __msg("same insn cannot be used with different pointers")
+__naked void fill_of_different_pointers_stx(void)
+{
+ asm volatile (" \
+ r3 = 42; \
+ r6 = r10; \
+ r6 += -8; \
+ if r1 == 0 goto l0_%=; \
+ r2 = r10; \
+ r2 += -16; \
+ *(u64*)(r6 + 0) = r2; \
+l0_%=: if r1 != 0 goto l1_%=; \
+ *(u64*)(r6 + 0) = r1; \
+l1_%=: r1 = *(u64*)(r6 + 0); \
+ *(u32*)(r1 + %[__sk_buff_mark]) = r3; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
+ : __clobber_all);
+}
+
+/* Same as above, but use BPF_ST_MEM to save 42
+ * instead of BPF_STX_MEM.
+ */
+SEC("tc")
+__description("unpriv: spill/fill of different pointers st")
+__failure __msg("same insn cannot be used with different pointers")
+__naked void fill_of_different_pointers_st(void)
+{
+ asm volatile (" \
+ r6 = r10; \
+ r6 += -8; \
+ if r1 == 0 goto l0_%=; \
+ r2 = r10; \
+ r2 += -16; \
+ *(u64*)(r6 + 0) = r2; \
+l0_%=: if r1 != 0 goto l1_%=; \
+ *(u64*)(r6 + 0) = r1; \
+l1_%=: r1 = *(u64*)(r6 + 0); \
+ .8byte %[st_mem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
+ __imm_insn(st_mem,
+ BPF_ST_MEM(BPF_W, BPF_REG_1, offsetof(struct __sk_buff, mark), 42))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("unpriv: spill/fill of different pointers stx - ctx and sock")
+__failure __msg("type=ctx expected=sock")
+__naked void pointers_stx_ctx_and_sock(void)
+{
+ asm volatile (" \
+ r8 = r1; \
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r2 = r0; \
+ /* u64 foo; */ \
+ /* void *target = &foo; */ \
+ r6 = r10; \
+ r6 += -8; \
+ r1 = r8; \
+ /* if (skb == NULL) *target = sock; */ \
+ if r1 == 0 goto l0_%=; \
+ *(u64*)(r6 + 0) = r2; \
+l0_%=: /* else *target = skb; */ \
+ if r1 != 0 goto l1_%=; \
+ *(u64*)(r6 + 0) = r1; \
+l1_%=: /* struct __sk_buff *skb = *target; */ \
+ r1 = *(u64*)(r6 + 0); \
+ /* skb->mark = 42; */ \
+ r3 = 42; \
+ *(u32*)(r1 + %[__sk_buff_mark]) = r3; \
+ /* if (sk) bpf_sk_release(sk) */ \
+ if r1 == 0 goto l2_%=; \
+ call %[bpf_sk_release]; \
+l2_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("unpriv: spill/fill of different pointers stx - leak sock")
+__failure
+//.errstr = "same insn cannot be used with different pointers",
+__msg("Unreleased reference")
+__naked void different_pointers_stx_leak_sock(void)
+{
+ asm volatile (" \
+ r8 = r1; \
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r2 = r0; \
+ /* u64 foo; */ \
+ /* void *target = &foo; */ \
+ r6 = r10; \
+ r6 += -8; \
+ r1 = r8; \
+ /* if (skb == NULL) *target = sock; */ \
+ if r1 == 0 goto l0_%=; \
+ *(u64*)(r6 + 0) = r2; \
+l0_%=: /* else *target = skb; */ \
+ if r1 != 0 goto l1_%=; \
+ *(u64*)(r6 + 0) = r1; \
+l1_%=: /* struct __sk_buff *skb = *target; */ \
+ r1 = *(u64*)(r6 + 0); \
+ /* skb->mark = 42; */ \
+ r3 = 42; \
+ *(u32*)(r1 + %[__sk_buff_mark]) = r3; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("unpriv: spill/fill of different pointers stx - sock and ctx (read)")
+__failure __msg("same insn cannot be used with different pointers")
+__naked void stx_sock_and_ctx_read(void)
+{
+ asm volatile (" \
+ r8 = r1; \
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r2 = r0; \
+ /* u64 foo; */ \
+ /* void *target = &foo; */ \
+ r6 = r10; \
+ r6 += -8; \
+ r1 = r8; \
+ /* if (skb) *target = skb */ \
+ if r1 == 0 goto l0_%=; \
+ *(u64*)(r6 + 0) = r1; \
+l0_%=: /* else *target = sock */ \
+ if r1 != 0 goto l1_%=; \
+ *(u64*)(r6 + 0) = r2; \
+l1_%=: /* struct bpf_sock *sk = *target; */ \
+ r1 = *(u64*)(r6 + 0); \
+ /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */\
+ if r1 == 0 goto l2_%=; \
+ r3 = *(u32*)(r1 + %[bpf_sock_mark]); \
+ call %[bpf_sk_release]; \
+l2_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("unpriv: spill/fill of different pointers stx - sock and ctx (write)")
+__failure
+//.errstr = "same insn cannot be used with different pointers",
+__msg("cannot write into sock")
+__naked void stx_sock_and_ctx_write(void)
+{
+ asm volatile (" \
+ r8 = r1; \
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
+" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
+" r2 = r0; \
+ /* u64 foo; */ \
+ /* void *target = &foo; */ \
+ r6 = r10; \
+ r6 += -8; \
+ r1 = r8; \
+ /* if (skb) *target = skb */ \
+ if r1 == 0 goto l0_%=; \
+ *(u64*)(r6 + 0) = r1; \
+l0_%=: /* else *target = sock */ \
+ if r1 != 0 goto l1_%=; \
+ *(u64*)(r6 + 0) = r2; \
+l1_%=: /* struct bpf_sock *sk = *target; */ \
+ r1 = *(u64*)(r6 + 0); \
+ /* if (sk) sk->mark = 42; bpf_sk_release(sk); */\
+ if r1 == 0 goto l2_%=; \
+ r3 = 42; \
+ *(u32*)(r1 + %[bpf_sock_mark]) = r3; \
+ call %[bpf_sk_release]; \
+l2_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_sk_lookup_tcp),
+ __imm(bpf_sk_release),
+ __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
+ __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: write pointer into map elem value")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(0)
+__naked void pointer_into_map_elem_value(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ *(u64*)(r0 + 0) = r0; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("alu32: mov u32 const")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if r0 == 0x0 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R7 invalid mem access 'scalar'` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("exit")
+#endif
+__naked void alu32_mov_u32_const(void)
+{
+ asm volatile (" \
+ w7 = 0; \
+ w7 &= 1; \
+ w0 = w7; \
+ if r0 == 0 goto l0_%=; \
+ r0 = *(u64*)(r7 + 0); \
+l0_%=: exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: partial copy of pointer")
+__success __failure_unpriv __msg_unpriv("R10 partial copy")
+__retval(0)
+__naked void unpriv_partial_copy_of_pointer(void)
+{
+ asm volatile (" \
+ w1 = w10; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: pass pointer to tail_call")
+__success __failure_unpriv __msg_unpriv("R3 leaks addr into helper")
+__retval(0)
+__naked void pass_pointer_to_tail_call(void)
+{
+ asm volatile (" \
+ r3 = r1; \
+ r2 = %[map_prog1_socket] ll; \
+ call %[bpf_tail_call]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_prog1_socket)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: cmp map pointer with zero")
+__success __success_unpriv
+__retval(0)
+__naked void cmp_map_pointer_with_zero(void)
+{
+ asm volatile (" \
+ r1 = %[map_hash_8b] ll; \
+ if r1 == 0 goto l0_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: cmp map pointer with const")
+__success __failure_unpriv __msg_unpriv("R1 pointer comparison prohibited")
+__retval(0)
+__naked void cmp_map_pointer_with_const(void)
+{
+ asm volatile (" \
+ r1 = %[map_hash_8b] ll; \
+ if r1 == 0x0000beef goto l0_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: write into frame pointer")
+__failure __msg("frame pointer is read only")
+__failure_unpriv
+__naked void unpriv_write_into_frame_pointer(void)
+{
+ asm volatile (" \
+ r10 = r1; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: spill/fill frame pointer")
+__failure __msg("frame pointer is read only")
+__failure_unpriv
+__naked void unpriv_spill_fill_frame_pointer(void)
+{
+ asm volatile (" \
+ r6 = r10; \
+ r6 += -8; \
+ *(u64*)(r6 + 0) = r10; \
+ r10 = *(u64*)(r6 + 0); \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: cmp of frame pointer")
+__success __failure_unpriv __msg_unpriv("R10 pointer comparison")
+__retval(0)
+__naked void unpriv_cmp_of_frame_pointer(void)
+{
+ asm volatile (" \
+ if r10 == 0 goto l0_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: adding of fp, reg")
+__success __failure_unpriv
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__retval(0)
+__naked void unpriv_adding_of_fp_reg(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ r1 = 0; \
+ r1 += r10; \
+ *(u64*)(r1 - 8) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: adding of fp, imm")
+__success __failure_unpriv
+__msg_unpriv("R1 stack pointer arithmetic goes out of range")
+__retval(0)
+__naked void unpriv_adding_of_fp_imm(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ r1 = r10; \
+ r1 += 0; \
+ *(u64*)(r1 - 8) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: cmp of stack pointer")
+__success __failure_unpriv __msg_unpriv("R2 pointer comparison")
+__retval(0)
+__naked void unpriv_cmp_of_stack_pointer(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ if r2 == 0 goto l0_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: Spectre v1 path-based type confusion of scalar as stack-ptr")
+__success __success_unpriv __retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if r0 != 0x1 goto pc+2")
+/* This nospec prevents the exploit because it forces the mispredicted (not
+ * taken) `if r0 != 0x0 goto l0_%=` to resolve before using r6 as a pointer.
+ * This causes the CPU to realize that `r6 = r9` should have never executed. It
+ * ensures that r6 always contains a readable stack slot ptr when the insn after
+ * the nospec executes.
+ */
+__xlated_unpriv("nospec")
+__xlated_unpriv("r9 = *(u8 *)(r6 +0)")
+#endif
+__naked void unpriv_spec_v1_type_confusion(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ /* r0: pointer to a map array entry */ \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ /* r1, r2: prepared call args */ \
+ r6 = r10; \
+ r6 += -8; \
+ /* r6: pointer to readable stack slot */ \
+ r9 = 0xffffc900; \
+ r9 <<= 32; \
+ /* r9: scalar controlled by attacker */ \
+ r0 = *(u64 *)(r0 + 0); /* cache miss */ \
+ if r0 != 0x0 goto l0_%=; \
+ r6 = r9; \
+l0_%=: if r0 != 0x1 goto l1_%=; \
+ r9 = *(u8 *)(r6 + 0); \
+l1_%=: /* leak r9 */ \
+ r9 &= 1; \
+ r9 <<= 9; \
+ *(u64*)(r10 - 8) = r9; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ /* leak secret into is_cached(map[0|512]): */ \
+ r0 = *(u64 *)(r0 + 0); \
+l2_%=: \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: ldimm64 before Spectre v4 barrier")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V4
+__xlated_unpriv("r1 = 0x2020200005642020") /* should not matter */
+__xlated_unpriv("*(u64 *)(r10 -8) = r1")
+__xlated_unpriv("nospec")
+#endif
+__naked void unpriv_ldimm64_spectre_v4(void)
+{
+ asm volatile (" \
+ r1 = 0x2020200005642020 ll; \
+ *(u64 *)(r10 -8) = r1; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: Spectre v1 and v4 barrier")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+#ifdef SPEC_V4
+/* starts with r0 == r8 == r9 == 0 */
+__xlated_unpriv("if r8 != 0x0 goto pc+1")
+__xlated_unpriv("goto pc+2")
+__xlated_unpriv("if r9 == 0x0 goto pc+4")
+__xlated_unpriv("r2 = r0")
+/* Following nospec required to prevent following dangerous `*(u64 *)(NOT_FP -64)
+ * = r1` iff `if r9 == 0 goto pc+4` was mispredicted because of Spectre v1. The
+ * test therefore ensures the Spectre-v4--induced nospec does not prevent the
+ * Spectre-v1--induced speculative path from being fully analyzed.
+ */
+__xlated_unpriv("nospec") /* Spectre v1 */
+__xlated_unpriv("*(u64 *)(r2 -64) = r1") /* could be used to leak r2 */
+__xlated_unpriv("nospec") /* Spectre v4 */
+#endif
+#endif
+__naked void unpriv_spectre_v1_and_v4(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r8 = r0; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r9 = r0; \
+ r0 = r10; \
+ r1 = 0; \
+ r2 = r10; \
+ if r8 != 0 goto l0_%=; \
+ if r9 != 0 goto l0_%=; \
+ r0 = 0; \
+l0_%=: if r8 != 0 goto l1_%=; \
+ goto l2_%=; \
+l1_%=: if r9 == 0 goto l3_%=; \
+ r2 = r0; \
+l2_%=: *(u64 *)(r2 -64) = r1; \
+l3_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: Spectre v1 and v4 barrier (simple)")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+#ifdef SPEC_V4
+__xlated_unpriv("if r8 != 0x0 goto pc+1")
+__xlated_unpriv("goto pc+2")
+__xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */
+__xlated_unpriv("goto pc-1") /* r2 = r0 */
+__xlated_unpriv("nospec")
+__xlated_unpriv("*(u64 *)(r2 -64) = r1")
+__xlated_unpriv("nospec")
+#endif
+#endif
+__naked void unpriv_spectre_v1_and_v4_simple(void)
+{
+ asm volatile (" \
+ r8 = 0; \
+ r9 = 0; \
+ r0 = r10; \
+ r1 = 0; \
+ r2 = r10; \
+ if r8 != 0 goto l0_%=; \
+ if r9 != 0 goto l0_%=; \
+ r0 = 0; \
+l0_%=: if r8 != 0 goto l1_%=; \
+ goto l2_%=; \
+l1_%=: if r9 == 0 goto l3_%=; \
+ r2 = r0; \
+l2_%=: *(u64 *)(r2 -64) = r1; \
+l3_%=: r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: ldimm64 before Spectre v1 and v4 barrier (simple)")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+#ifdef SPEC_V4
+__xlated_unpriv("if r8 != 0x0 goto pc+1")
+__xlated_unpriv("goto pc+4")
+__xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */
+__xlated_unpriv("goto pc-1") /* r2 = r0 */
+__xlated_unpriv("goto pc-1") /* r1 = 0x2020200005642020 ll */
+__xlated_unpriv("goto pc-1") /* second part of ldimm64 */
+__xlated_unpriv("nospec")
+__xlated_unpriv("*(u64 *)(r2 -64) = r1")
+__xlated_unpriv("nospec")
+#endif
+#endif
+__naked void unpriv_ldimm64_spectre_v1_and_v4_simple(void)
+{
+ asm volatile (" \
+ r8 = 0; \
+ r9 = 0; \
+ r0 = r10; \
+ r1 = 0; \
+ r2 = r10; \
+ if r8 != 0 goto l0_%=; \
+ if r9 != 0 goto l0_%=; \
+ r0 = 0; \
+l0_%=: if r8 != 0 goto l1_%=; \
+ goto l2_%=; \
+l1_%=: if r9 == 0 goto l3_%=; \
+ r2 = r0; \
+ r1 = 0x2020200005642020 ll; \
+l2_%=: *(u64 *)(r2 -64) = r1; \
+l3_%=: r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c b/tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c
new file mode 100644
index 000000000000..4d77407a0a79
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("perf_event")
+__description("unpriv: spill/fill of different pointers ldx")
+__failure __msg("same insn cannot be used with different pointers")
+__naked void fill_of_different_pointers_ldx(void)
+{
+ asm volatile (" \
+ r6 = r10; \
+ r6 += -8; \
+ if r1 == 0 goto l0_%=; \
+ r2 = r10; \
+ r2 += %[__imm_0]; \
+ *(u64*)(r6 + 0) = r2; \
+l0_%=: if r1 != 0 goto l1_%=; \
+ *(u64*)(r6 + 0) = r1; \
+l1_%=: r1 = *(u64*)(r6 + 0); \
+ r1 = *(u64*)(r1 + %[sample_period]); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__imm_0,
+ -(__s32) offsetof(struct bpf_perf_event_data, sample_period) - 8),
+ __imm_const(sample_period,
+ offsetof(struct bpf_perf_event_data, sample_period))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_value.c b/tools/testing/selftests/bpf/progs/verifier_value.c
new file mode 100644
index 000000000000..b5af6b6f5acd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_value.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/value.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+SEC("socket")
+__description("map element value store of cleared call register")
+__failure __msg("R1 !read_ok")
+__failure_unpriv __msg_unpriv("R1 !read_ok")
+__naked void store_of_cleared_call_register(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map element value with unaligned store")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void element_value_with_unaligned_store(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r0 += 3; \
+ r1 = 42; \
+ *(u64*)(r0 + 0) = r1; \
+ r1 = 43; \
+ *(u64*)(r0 + 2) = r1; \
+ r1 = 44; \
+ *(u64*)(r0 - 2) = r1; \
+ r8 = r0; \
+ r1 = 32; \
+ *(u64*)(r8 + 0) = r1; \
+ r1 = 33; \
+ *(u64*)(r8 + 2) = r1; \
+ r1 = 34; \
+ *(u64*)(r8 - 2) = r1; \
+ r8 += 5; \
+ r1 = 22; \
+ *(u64*)(r8 + 0) = r1; \
+ r1 = 23; \
+ *(u64*)(r8 + 4) = r1; \
+ r1 = 24; \
+ *(u64*)(r8 - 7) = r1; \
+ r7 = r8; \
+ r7 += 3; \
+ r1 = 22; \
+ *(u64*)(r7 + 0) = r1; \
+ r1 = 23; \
+ *(u64*)(r7 + 4) = r1; \
+ r1 = 24; \
+ *(u64*)(r7 - 4) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map element value with unaligned load")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void element_value_with_unaligned_load(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u32*)(r0 + 0); \
+ if r1 >= %[max_entries] goto l0_%=; \
+ r0 += 3; \
+ r7 = *(u64*)(r0 + 0); \
+ r7 = *(u64*)(r0 + 2); \
+ r8 = r0; \
+ r7 = *(u64*)(r8 + 0); \
+ r7 = *(u64*)(r8 + 2); \
+ r0 += 5; \
+ r7 = *(u64*)(r0 + 0); \
+ r7 = *(u64*)(r0 + 4); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(max_entries, MAX_ENTRIES)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map element value is preserved across register spilling")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void is_preserved_across_register_spilling(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r0 += %[test_val_foo]; \
+ r1 = 42; \
+ *(u64*)(r0 + 0) = r1; \
+ r1 = r10; \
+ r1 += -184; \
+ *(u64*)(r1 + 0) = r0; \
+ r3 = *(u64*)(r1 + 0); \
+ r1 = 42; \
+ *(u64*)(r3 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c b/tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c
new file mode 100644
index 000000000000..d7a5ba9bbe6a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/value_adj_spill.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+SEC("socket")
+__description("map element value is preserved across register spilling")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(0)
+__naked void is_preserved_across_register_spilling(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 42; \
+ *(u64*)(r0 + 0) = r1; \
+ r1 = r10; \
+ r1 += -184; \
+ *(u64*)(r1 + 0) = r0; \
+ r3 = *(u64*)(r1 + 0); \
+ r1 = 42; \
+ *(u64*)(r3 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map element value or null is marked on register spilling")
+__success __failure_unpriv __msg_unpriv("R0 leaks addr")
+__retval(0)
+__naked void is_marked_on_register_spilling(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r1 = r10; \
+ r1 += -152; \
+ *(u64*)(r1 + 0) = r0; \
+ if r0 == 0 goto l0_%=; \
+ r3 = *(u64*)(r1 + 0); \
+ r1 = 42; \
+ *(u64*)(r3 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c
new file mode 100644
index 000000000000..a9ab37d3b9e2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/value_illegal_alu.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+SEC("socket")
+__description("map element value illegal alu op, 1")
+__failure __msg("R0 bitwise operator &= on pointer")
+__failure_unpriv
+__naked void value_illegal_alu_op_1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r0 &= 8; \
+ r1 = 22; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map element value illegal alu op, 2")
+__failure __msg("R0 32-bit pointer arithmetic prohibited")
+__failure_unpriv
+__naked void value_illegal_alu_op_2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ w0 += 0; \
+ r1 = 22; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map element value illegal alu op, 3")
+__failure __msg("R0 pointer arithmetic with /= operator")
+__failure_unpriv
+__naked void value_illegal_alu_op_3(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r0 /= 42; \
+ r1 = 22; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map element value illegal alu op, 4")
+__failure __msg("invalid mem access 'scalar'")
+__failure_unpriv __msg_unpriv("R0 pointer arithmetic prohibited")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void value_illegal_alu_op_4(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r0 = be64 r0; \
+ r1 = 22; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map element value illegal alu op, 5")
+__failure __msg("R0 invalid mem access 'scalar'")
+__msg_unpriv("leaking pointer from stack off -8")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void value_illegal_alu_op_5(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = 0; \
+ *(u64*)(r2 + 0) = r1; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r3 = 4096; \
+ r2 = r10; \
+ r2 += -8; \
+ *(u64*)(r2 + 0) = r0; \
+ lock *(u64 *)(r2 + 0) += r3; \
+ r0 = *(u64*)(r2 + 0); \
+ r1 = 22; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
+SEC("flow_dissector")
+__description("flow_keys illegal alu op with variable offset")
+__failure __msg("R7 pointer arithmetic on flow_keys prohibited")
+__naked void flow_keys_illegal_variable_offset_alu(void)
+{
+ asm volatile(" \
+ r6 = r1; \
+ r7 = *(u64*)(r6 + %[flow_keys_off]); \
+ r8 = 8; \
+ r8 /= 1; \
+ r8 &= 8; \
+ r7 += r8; \
+ r0 = *(u64*)(r7 + 0); \
+ exit; \
+" :
+ : __imm_const(flow_keys_off, offsetof(struct __sk_buff, flow_keys))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_value_or_null.c b/tools/testing/selftests/bpf/progs/verifier_value_or_null.c
new file mode 100644
index 000000000000..8ff668a242eb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_value_or_null.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/value_or_null.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("tc")
+__description("multiple registers share map_lookup_elem result")
+__success __retval(0)
+__naked void share_map_lookup_elem_result(void)
+{
+ asm volatile (" \
+ r1 = 10; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r4 = r0; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 0; \
+ *(u64*)(r4 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("alu ops on ptr_to_map_value_or_null, 1")
+__failure __msg("R4 pointer arithmetic on map_value_or_null")
+__naked void map_value_or_null_1(void)
+{
+ asm volatile (" \
+ r1 = 10; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r4 = r0; \
+ r4 += -2; \
+ r4 += 2; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 0; \
+ *(u64*)(r4 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("alu ops on ptr_to_map_value_or_null, 2")
+__failure __msg("R4 pointer arithmetic on map_value_or_null")
+__naked void map_value_or_null_2(void)
+{
+ asm volatile (" \
+ r1 = 10; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r4 = r0; \
+ r4 &= -1; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 0; \
+ *(u64*)(r4 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("alu ops on ptr_to_map_value_or_null, 3")
+__failure __msg("R4 pointer arithmetic on map_value_or_null")
+__naked void map_value_or_null_3(void)
+{
+ asm volatile (" \
+ r1 = 10; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r4 = r0; \
+ r4 <<= 1; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 0; \
+ *(u64*)(r4 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("invalid memory access with multiple map_lookup_elem calls")
+__failure __msg("R4 !read_ok")
+__naked void multiple_map_lookup_elem_calls(void)
+{
+ asm volatile (" \
+ r1 = 10; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ r8 = r1; \
+ r7 = r2; \
+ call %[bpf_map_lookup_elem]; \
+ r4 = r0; \
+ r1 = r8; \
+ r2 = r7; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 0; \
+ *(u64*)(r4 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("valid indirect map_lookup_elem access with 2nd lookup in branch")
+__success __retval(0)
+__naked void with_2nd_lookup_in_branch(void)
+{
+ asm volatile (" \
+ r1 = 10; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ r8 = r1; \
+ r7 = r2; \
+ call %[bpf_map_lookup_elem]; \
+ r2 = 10; \
+ if r2 != 0 goto l0_%=; \
+ r1 = r8; \
+ r2 = r7; \
+ call %[bpf_map_lookup_elem]; \
+l0_%=: r4 = r0; \
+ if r0 == 0 goto l1_%=; \
+ r1 = 0; \
+ *(u64*)(r4 + 0) = r1; \
+l1_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid map access from else condition")
+__failure __msg("R0 unbounded memory access")
+__failure_unpriv __msg_unpriv("R0 leaks addr")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void map_access_from_else_condition(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u32*)(r0 + 0); \
+ if r1 >= %[__imm_0] goto l1_%=; \
+ r1 += 1; \
+l1_%=: r1 <<= 2; \
+ r0 += r1; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_48b),
+ __imm_const(__imm_0, MAX_ENTRIES-1),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("map lookup and null branch prediction")
+__success __retval(0)
+__naked void lookup_and_null_branch_prediction(void)
+{
+ asm volatile (" \
+ r1 = 10; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r6 = r0; \
+ if r6 == 0 goto l0_%=; \
+ if r6 != 0 goto l0_%=; \
+ r10 += 10; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("MAP_VALUE_OR_NULL check_ids() in regsafe()")
+__failure __msg("R8 invalid mem access 'map_value_or_null'")
+__failure_unpriv __msg_unpriv("")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void null_check_ids_in_regsafe(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ /* r9 = map_lookup_elem(...) */ \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r9 = r0; \
+ /* r8 = map_lookup_elem(...) */ \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r8 = r0; \
+ /* r7 = ktime_get_ns() */ \
+ call %[bpf_ktime_get_ns]; \
+ r7 = r0; \
+ /* r6 = ktime_get_ns() */ \
+ call %[bpf_ktime_get_ns]; \
+ r6 = r0; \
+ /* if r6 > r7 goto +1 ; no new information about the state is derived from\
+ * ; this check, thus produced verifier states differ\
+ * ; only in 'insn_idx' \
+ * r9 = r8 ; optionally share ID between r9 and r8\
+ */ \
+ if r6 > r7 goto l0_%=; \
+ r9 = r8; \
+l0_%=: /* if r9 == 0 goto <exit> */ \
+ if r9 == 0 goto l1_%=; \
+ /* read map value via r8, this is not always \
+ * safe because r8 might be not equal to r9. \
+ */ \
+ r0 = *(u64*)(r8 + 0); \
+l1_%=: /* exit 0 */ \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
new file mode 100644
index 000000000000..af7938ce56cb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
@@ -0,0 +1,1441 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/value_ptr_arith.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <errno.h>
+#include "bpf_misc.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct test_val);
+} map_array_48b SEC(".maps");
+
+struct other_val {
+ long long foo;
+ long long bar;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct other_val);
+} map_hash_16b SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct test_val);
+} map_hash_48b SEC(".maps");
+
+SEC("socket")
+__description("map access: known scalar += value_ptr unknown vs const")
+__success __failure_unpriv
+__msg_unpriv("R1 tried to add from different maps, paths or scalars")
+__retval(1)
+__naked void value_ptr_unknown_vs_const(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_len]); \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ if r0 == 1 goto l0_%=; \
+ r1 = %[map_hash_16b] ll; \
+ if r0 != 1 goto l1_%=; \
+l0_%=: r1 = %[map_array_48b] ll; \
+l1_%=: call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r4 = *(u8*)(r0 + 0); \
+ if r4 == 1 goto l3_%=; \
+ r1 = 6; \
+ r1 = -r1; \
+ r1 &= 0x7; \
+ goto l4_%=; \
+l3_%=: r1 = 3; \
+l4_%=: r1 += r0; \
+ r0 = *(u8*)(r1 + 0); \
+l2_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b),
+ __imm_addr(map_hash_16b),
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: known scalar += value_ptr const vs unknown")
+__success __failure_unpriv
+__msg_unpriv("R1 tried to add from different maps, paths or scalars")
+__retval(1)
+__naked void value_ptr_const_vs_unknown(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_len]); \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ if r0 == 1 goto l0_%=; \
+ r1 = %[map_hash_16b] ll; \
+ if r0 != 1 goto l1_%=; \
+l0_%=: r1 = %[map_array_48b] ll; \
+l1_%=: call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r4 = *(u8*)(r0 + 0); \
+ if r4 == 1 goto l3_%=; \
+ r1 = 3; \
+ goto l4_%=; \
+l3_%=: r1 = 6; \
+ r1 = -r1; \
+ r1 &= 0x7; \
+l4_%=: r1 += r0; \
+ r0 = *(u8*)(r1 + 0); \
+l2_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b),
+ __imm_addr(map_hash_16b),
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: known scalar += value_ptr const vs const (ne)")
+__success __failure_unpriv
+__msg_unpriv("R1 tried to add from different maps, paths or scalars")
+__retval(1)
+__naked void ptr_const_vs_const_ne(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_len]); \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ if r0 == 1 goto l0_%=; \
+ r1 = %[map_hash_16b] ll; \
+ if r0 != 1 goto l1_%=; \
+l0_%=: r1 = %[map_array_48b] ll; \
+l1_%=: call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r4 = *(u8*)(r0 + 0); \
+ if r4 == 1 goto l3_%=; \
+ r1 = 3; \
+ goto l4_%=; \
+l3_%=: r1 = 5; \
+l4_%=: r1 += r0; \
+ r0 = *(u8*)(r1 + 0); \
+l2_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b),
+ __imm_addr(map_hash_16b),
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: known scalar += value_ptr const vs const (eq)")
+__success __success_unpriv __retval(1)
+__naked void ptr_const_vs_const_eq(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_len]); \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ if r0 == 1 goto l0_%=; \
+ r1 = %[map_hash_16b] ll; \
+ if r0 != 1 goto l1_%=; \
+l0_%=: r1 = %[map_array_48b] ll; \
+l1_%=: call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r4 = *(u8*)(r0 + 0); \
+ if r4 == 1 goto l3_%=; \
+ r1 = 5; \
+ goto l4_%=; \
+l3_%=: r1 = 5; \
+l4_%=: r1 += r0; \
+ r0 = *(u8*)(r1 + 0); \
+l2_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b),
+ __imm_addr(map_hash_16b),
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: known scalar += value_ptr unknown vs unknown (eq)")
+__success __success_unpriv __retval(1)
+__naked void ptr_unknown_vs_unknown_eq(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_len]); \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ if r0 == 1 goto l0_%=; \
+ r1 = %[map_hash_16b] ll; \
+ if r0 != 1 goto l1_%=; \
+l0_%=: r1 = %[map_array_48b] ll; \
+l1_%=: call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r4 = *(u8*)(r0 + 0); \
+ if r4 == 1 goto l3_%=; \
+ r1 = 6; \
+ r1 = -r1; \
+ r1 &= 0x7; \
+ goto l4_%=; \
+l3_%=: r1 = 6; \
+ r1 = -r1; \
+ r1 &= 0x7; \
+l4_%=: r1 += r0; \
+ r0 = *(u8*)(r1 + 0); \
+l2_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b),
+ __imm_addr(map_hash_16b),
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: known scalar += value_ptr unknown vs unknown (lt)")
+__success __failure_unpriv
+__msg_unpriv("R1 tried to add from different maps, paths or scalars")
+__retval(1)
+__naked void ptr_unknown_vs_unknown_lt(void)
+{
+ asm volatile (" \
+ r8 = r1; \
+ call %[bpf_get_prandom_u32]; \
+ r9 = r0; \
+ r1 = r8; \
+ r0 = *(u32*)(r1 + %[__sk_buff_len]); \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ if r0 == 1 goto l0_%=; \
+ r1 = %[map_hash_16b] ll; \
+ if r0 != 1 goto l1_%=; \
+l0_%=: r1 = %[map_array_48b] ll; \
+l1_%=: call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r4 = *(u8*)(r0 + 0); \
+ if r4 == 1 goto l3_%=; \
+ r1 = 6; \
+ r1 = r9; \
+ r1 &= 0x3; \
+ goto l4_%=; \
+l3_%=: r1 = 6; \
+ r1 = r9; \
+ r1 &= 0x7; \
+l4_%=: r1 += r0; \
+ r0 = *(u8*)(r1 + 0); \
+l2_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b),
+ __imm_addr(map_hash_16b),
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: known scalar += value_ptr unknown vs unknown (gt)")
+__success __failure_unpriv
+__msg_unpriv("R1 tried to add from different maps, paths or scalars")
+__retval(1)
+__naked void ptr_unknown_vs_unknown_gt(void)
+{
+ asm volatile (" \
+ r8 = r1; \
+ call %[bpf_get_prandom_u32]; \
+ r9 = r0; \
+ r1 = r8; \
+ r0 = *(u32*)(r1 + %[__sk_buff_len]); \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ if r0 == 1 goto l0_%=; \
+ r1 = %[map_hash_16b] ll; \
+ if r0 != 1 goto l1_%=; \
+l0_%=: r1 = %[map_array_48b] ll; \
+l1_%=: call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r4 = *(u8*)(r0 + 0); \
+ if r4 == 1 goto l3_%=; \
+ r1 = 6; \
+ r1 = r9; \
+ r1 &= 0x7; \
+ goto l4_%=; \
+l3_%=: r1 = 6; \
+ r1 = r9; \
+ r1 &= 0x3; \
+l4_%=: r1 += r0; \
+ r0 = *(u8*)(r1 + 0); \
+l2_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b),
+ __imm_addr(map_hash_16b),
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: known scalar += value_ptr from different maps")
+__success __success_unpriv __retval(1)
+__naked void value_ptr_from_different_maps(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_len]); \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ if r0 == 1 goto l0_%=; \
+ r1 = %[map_hash_16b] ll; \
+ if r0 != 1 goto l1_%=; \
+l0_%=: r1 = %[map_array_48b] ll; \
+l1_%=: call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r1 = 4; \
+ r1 += r0; \
+ r0 = *(u8*)(r1 + 0); \
+l2_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b),
+ __imm_addr(map_hash_16b),
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr -= known scalar from different maps")
+__success __failure_unpriv
+__msg_unpriv("R0 min value is outside of the allowed memory range")
+__retval(1)
+__naked void known_scalar_from_different_maps(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_len]); \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ if r0 == 1 goto l0_%=; \
+ r1 = %[map_hash_16b] ll; \
+ if r0 != 1 goto l1_%=; \
+l0_%=: r1 = %[map_array_48b] ll; \
+l1_%=: call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r1 = 4; \
+ r0 -= r1; \
+ r0 += r1; \
+ r0 = *(u8*)(r0 + 0); \
+l2_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b),
+ __imm_addr(map_hash_16b),
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: known scalar += value_ptr from different maps, but same value properties")
+__success __success_unpriv __retval(1)
+__naked void maps_but_same_value_properties(void)
+{
+ asm volatile (" \
+ r0 = *(u32*)(r1 + %[__sk_buff_len]); \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ if r0 == 1 goto l0_%=; \
+ r1 = %[map_hash_48b] ll; \
+ if r0 != 1 goto l1_%=; \
+l0_%=: r1 = %[map_array_48b] ll; \
+l1_%=: call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ r1 = 4; \
+ r1 += r0; \
+ r0 = *(u8*)(r1 + 0); \
+l2_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b),
+ __imm_addr(map_hash_48b),
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: mixing value pointer and scalar, 1")
+__success __failure_unpriv
+__msg_unpriv("R2 tried to add from different maps, paths or scalars, pointer arithmetic with it prohibited for !root")
+__retval(0)
+__naked void value_pointer_and_scalar_1(void)
+{
+ asm volatile (" \
+ /* load map value pointer into r0 and r2 */ \
+ r0 = 1; \
+ r1 = %[map_array_48b] ll; \
+ r2 = r10; \
+ r2 += -16; \
+ r6 = 0; \
+ *(u64*)(r10 - 16) = r6; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: /* load some number from the map into r1 */ \
+ r1 = *(u8*)(r0 + 0); \
+ /* depending on r1, branch: */ \
+ if r1 != 0 goto l1_%=; \
+ /* branch A */ \
+ r2 = r0; \
+ r3 = 0; \
+ goto l2_%=; \
+l1_%=: /* branch B */ \
+ r2 = 0; \
+ r3 = 0x100000; \
+l2_%=: /* common instruction */ \
+ r2 += r3; \
+ /* depending on r1, branch: */ \
+ if r1 != 0 goto l3_%=; \
+ /* branch A */ \
+ goto l4_%=; \
+l3_%=: /* branch B */ \
+ r0 = 0x13371337; \
+ /* verifier follows fall-through */ \
+ /* unpriv: nospec (inserted to prevent `R2 pointer comparison prohibited`) */\
+ if r2 != 0x100000 goto l4_%=; \
+ r0 = 0; \
+ exit; \
+l4_%=: /* fake-dead code; targeted from branch A to \
+ * prevent dead code sanitization \
+ */ \
+ r0 = *(u8*)(r0 + 0); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: mixing value pointer and scalar, 2")
+__success __failure_unpriv
+__msg_unpriv("R2 tried to add from different maps, paths or scalars, pointer arithmetic with it prohibited for !root")
+__retval(0)
+__naked void value_pointer_and_scalar_2(void)
+{
+ asm volatile (" \
+ /* load map value pointer into r0 and r2 */ \
+ r0 = 1; \
+ r1 = %[map_array_48b] ll; \
+ r2 = r10; \
+ r2 += -16; \
+ r6 = 0; \
+ *(u64*)(r10 - 16) = r6; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: /* load some number from the map into r1 */ \
+ r1 = *(u8*)(r0 + 0); \
+ /* depending on r1, branch: */ \
+ if r1 == 0 goto l1_%=; \
+ /* branch A */ \
+ r2 = 0; \
+ r3 = 0x100000; \
+ goto l2_%=; \
+l1_%=: /* branch B */ \
+ r2 = r0; \
+ r3 = 0; \
+l2_%=: /* common instruction */ \
+ r2 += r3; \
+ /* depending on r1, branch: */ \
+ if r1 != 0 goto l3_%=; \
+ /* branch A */ \
+ goto l4_%=; \
+l3_%=: /* branch B */ \
+ r0 = 0x13371337; \
+ /* verifier follows fall-through */ \
+ if r2 != 0x100000 goto l4_%=; \
+ r0 = 0; \
+ exit; \
+l4_%=: /* fake-dead code; targeted from branch A to \
+ * prevent dead code sanitization, rejected \
+ * via branch B however \
+ */ \
+ /* unpriv: nospec (inserted to prevent `R0 invalid mem access 'scalar'`) */\
+ r0 = *(u8*)(r0 + 0); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("sanitation: alu with different scalars 1")
+__success __success_unpriv __retval(0x100000)
+__naked void alu_with_different_scalars_1(void)
+{
+ asm volatile (" \
+ r0 = 1; \
+ r1 = %[map_array_48b] ll; \
+ r2 = r10; \
+ r2 += -16; \
+ r6 = 0; \
+ *(u64*)(r10 - 16) = r6; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r1 = *(u32*)(r0 + 0); \
+ if r1 == 0 goto l1_%=; \
+ r2 = 0; \
+ r3 = 0x100000; \
+ goto l2_%=; \
+l1_%=: r2 = 42; \
+ r3 = 0x100001; \
+l2_%=: r2 += r3; \
+ r0 = r2; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("sanitation: alu with different scalars 2")
+__success __success_unpriv __retval(0)
+__naked void alu_with_different_scalars_2(void)
+{
+ asm volatile (" \
+ r0 = 1; \
+ r1 = %[map_array_48b] ll; \
+ r6 = r1; \
+ r2 = r10; \
+ r2 += -16; \
+ r7 = 0; \
+ *(u64*)(r10 - 16) = r7; \
+ call %[bpf_map_delete_elem]; \
+ r7 = r0; \
+ r1 = r6; \
+ r2 = r10; \
+ r2 += -16; \
+ call %[bpf_map_delete_elem]; \
+ r6 = r0; \
+ r8 = r6; \
+ r8 += r7; \
+ r0 = r8; \
+ r0 += %[einval]; \
+ r0 += %[einval]; \
+ exit; \
+" :
+ : __imm(bpf_map_delete_elem),
+ __imm_addr(map_array_48b),
+ __imm_const(einval, EINVAL)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("sanitation: alu with different scalars 3")
+__success __success_unpriv __retval(0)
+__naked void alu_with_different_scalars_3(void)
+{
+ asm volatile (" \
+ r0 = %[einval]; \
+ r0 *= -1; \
+ r7 = r0; \
+ r0 = %[einval]; \
+ r0 *= -1; \
+ r6 = r0; \
+ r8 = r6; \
+ r8 += r7; \
+ r0 = r8; \
+ r0 += %[einval]; \
+ r0 += %[einval]; \
+ exit; \
+" :
+ : __imm_const(einval, EINVAL)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += known scalar, upper oob arith, test 1")
+__success __failure_unpriv
+__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
+__retval(1)
+__naked void upper_oob_arith_test_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 48; \
+ r0 += r1; \
+ r0 -= r1; \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += known scalar, upper oob arith, test 2")
+__success __failure_unpriv
+__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
+__retval(1)
+__naked void upper_oob_arith_test_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 49; \
+ r0 += r1; \
+ r0 -= r1; \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += known scalar, upper oob arith, test 3")
+__success __success_unpriv __retval(1)
+__naked void upper_oob_arith_test_3(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 47; \
+ r0 += r1; \
+ r0 -= r1; \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr -= known scalar, lower oob arith, test 1")
+__failure __msg("R0 min value is outside of the allowed memory range")
+__failure_unpriv
+__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
+__naked void lower_oob_arith_test_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 47; \
+ r0 += r1; \
+ r1 = 48; \
+ r0 -= r1; \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr -= known scalar, lower oob arith, test 2")
+__success __failure_unpriv
+__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
+__retval(1)
+__naked void lower_oob_arith_test_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 47; \
+ r0 += r1; \
+ r1 = 48; \
+ r0 -= r1; \
+ r1 = 1; \
+ r0 += r1; \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr -= known scalar, lower oob arith, test 3")
+__success __success_unpriv __retval(1)
+__naked void lower_oob_arith_test_3(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 47; \
+ r0 += r1; \
+ r1 = 47; \
+ r0 -= r1; \
+ r0 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: known scalar += value_ptr")
+__success __success_unpriv __retval(1)
+__naked void access_known_scalar_value_ptr_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 4; \
+ r1 += r0; \
+ r0 = *(u8*)(r1 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += known scalar, 1")
+__success __success_unpriv __retval(1)
+__naked void value_ptr_known_scalar_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 4; \
+ r0 += r1; \
+ r1 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += known scalar, 2")
+__failure __msg("invalid access to map value")
+__failure_unpriv
+__naked void value_ptr_known_scalar_2_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 49; \
+ r0 += r1; \
+ r1 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += known scalar, 3")
+__failure __msg("invalid access to map value")
+__failure_unpriv
+__naked void value_ptr_known_scalar_3(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = -1; \
+ r0 += r1; \
+ r1 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += known scalar, 4")
+__success __success_unpriv __retval(1)
+__naked void value_ptr_known_scalar_4(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 5; \
+ r0 += r1; \
+ r1 = -2; \
+ r0 += r1; \
+ r1 = -1; \
+ r0 += r1; \
+ r1 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += known scalar, 5")
+__success __success_unpriv __retval(0xabcdef12)
+__naked void value_ptr_known_scalar_5(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = %[__imm_0]; \
+ r1 += r0; \
+ r0 = *(u32*)(r1 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b),
+ __imm_const(__imm_0, (6 + 1) * sizeof(int))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += known scalar, 6")
+__success __success_unpriv __retval(0xabcdef12)
+__naked void value_ptr_known_scalar_6(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = %[__imm_0]; \
+ r0 += r1; \
+ r1 = %[__imm_1]; \
+ r0 += r1; \
+ r0 = *(u32*)(r0 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b),
+ __imm_const(__imm_0, (3 + 1) * sizeof(int)),
+ __imm_const(__imm_1, 3 * sizeof(int))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += N, value_ptr -= N known scalar")
+__success __success_unpriv __retval(0x12345678)
+__naked void value_ptr_n_known_scalar(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ w1 = 0x12345678; \
+ *(u32*)(r0 + 0) = r1; \
+ r0 += 2; \
+ r1 = 2; \
+ r0 -= r1; \
+ r0 = *(u32*)(r0 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: unknown scalar += value_ptr, 1")
+__success __success_unpriv __retval(1)
+__naked void unknown_scalar_value_ptr_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u8*)(r0 + 0); \
+ r1 &= 0xf; \
+ r1 += r0; \
+ r0 = *(u8*)(r1 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: unknown scalar += value_ptr, 2")
+__success __success_unpriv __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void unknown_scalar_value_ptr_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u32*)(r0 + 0); \
+ r1 &= 31; \
+ r1 += r0; \
+ r0 = *(u32*)(r1 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: unknown scalar += value_ptr, 3")
+__success __failure_unpriv
+__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
+__retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void unknown_scalar_value_ptr_3(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = -1; \
+ r0 += r1; \
+ r1 = 1; \
+ r0 += r1; \
+ r1 = *(u32*)(r0 + 0); \
+ r1 &= 31; \
+ r1 += r0; \
+ r0 = *(u32*)(r1 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: unknown scalar += value_ptr, 4")
+__failure __msg("R1 max value is outside of the allowed memory range")
+__msg_unpriv("R1 pointer arithmetic of map value goes out of range")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void unknown_scalar_value_ptr_4(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 19; \
+ r0 += r1; \
+ r1 = *(u32*)(r0 + 0); \
+ r1 &= 31; \
+ r1 += r0; \
+ r0 = *(u32*)(r1 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += unknown scalar, 1")
+__success __success_unpriv __retval(1)
+__naked void value_ptr_unknown_scalar_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u8*)(r0 + 0); \
+ r1 &= 0xf; \
+ r0 += r1; \
+ r1 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += unknown scalar, 2")
+__success __success_unpriv __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void value_ptr_unknown_scalar_2_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u32*)(r0 + 0); \
+ r1 &= 31; \
+ r0 += r1; \
+ r0 = *(u32*)(r0 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += unknown scalar, 3")
+__success __success_unpriv __retval(1)
+__naked void value_ptr_unknown_scalar_3(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u64*)(r0 + 0); \
+ r2 = *(u64*)(r0 + 8); \
+ r3 = *(u64*)(r0 + 16); \
+ r1 &= 0xf; \
+ r3 &= 1; \
+ r3 |= 1; \
+ if r2 > r3 goto l0_%=; \
+ r0 += r3; \
+ r0 = *(u8*)(r0 + 0); \
+ r0 = 1; \
+l1_%=: exit; \
+l0_%=: r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr += value_ptr")
+__failure __msg("R0 pointer += pointer prohibited")
+__failure_unpriv
+__naked void access_value_ptr_value_ptr_1(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r0 += r0; \
+ r1 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: known scalar -= value_ptr")
+__failure __msg("R1 tried to subtract pointer from scalar")
+__failure_unpriv
+__naked void access_known_scalar_value_ptr_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 4; \
+ r1 -= r0; \
+ r0 = *(u8*)(r1 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr -= known scalar")
+__failure __msg("R0 min value is outside of the allowed memory range")
+__failure_unpriv
+__naked void access_value_ptr_known_scalar(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 4; \
+ r0 -= r1; \
+ r1 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr -= known scalar, 2")
+__success __success_unpriv __retval(1)
+__naked void value_ptr_known_scalar_2_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = 6; \
+ r2 = 4; \
+ r0 += r1; \
+ r0 -= r2; \
+ r1 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: unknown scalar -= value_ptr")
+__failure __msg("R1 tried to subtract pointer from scalar")
+__failure_unpriv
+__naked void access_unknown_scalar_value_ptr(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u8*)(r0 + 0); \
+ r1 &= 0xf; \
+ r1 -= r0; \
+ r0 = *(u8*)(r1 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr -= unknown scalar")
+__failure __msg("R0 min value is negative")
+__failure_unpriv
+__naked void access_value_ptr_unknown_scalar(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u8*)(r0 + 0); \
+ r1 &= 0xf; \
+ r0 -= r1; \
+ r1 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr -= unknown scalar, 2")
+__success __success_unpriv
+__retval(1)
+#ifdef SPEC_V1
+__xlated_unpriv("r1 &= 7")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 pointer arithmetic of map value goes out of range` */
+__xlated_unpriv("r0 -= r1")
+#endif
+__naked void value_ptr_unknown_scalar_2_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r1 = *(u8*)(r0 + 0); \
+ r1 &= 0xf; \
+ r1 |= 0x7; \
+ r0 += r1; \
+ r1 = *(u8*)(r0 + 0); \
+ r1 &= 0x7; \
+ r0 -= r1; \
+ r1 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: value_ptr -= value_ptr")
+__failure __msg("R0 invalid mem access 'scalar'")
+__msg_unpriv("R0 pointer -= pointer prohibited")
+__naked void access_value_ptr_value_ptr_2(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ r0 -= r0; \
+ r1 = *(u8*)(r0 + 0); \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map access: trying to leak tainted dst reg")
+__failure __msg("math between map_value pointer and 4294967295 is not allowed")
+__failure_unpriv
+__naked void to_leak_tainted_dst_reg(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_48b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r2 = r0; \
+ w1 = 0xFFFFFFFF; \
+ w1 = w1; \
+ r2 -= r1; \
+ *(u64*)(r0 + 0) = r2; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_48b)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("32bit pkt_ptr -= scalar")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void _32bit_pkt_ptr_scalar(void)
+{
+ asm volatile (" \
+ r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r6 = r7; \
+ r6 += 40; \
+ if r6 > r8 goto l0_%=; \
+ w4 = w7; \
+ w6 -= w4; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("32bit scalar -= pkt_ptr")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void _32bit_scalar_pkt_ptr(void)
+{
+ asm volatile (" \
+ r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r7 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r6 = r7; \
+ r6 += 40; \
+ if r6 > r8 goto l0_%=; \
+ w4 = w6; \
+ w4 -= w7; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_var_off.c b/tools/testing/selftests/bpf/progs/verifier_var_off.c
new file mode 100644
index 000000000000..1d36d01b746e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_var_off.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/var_off.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("lwt_in")
+__description("variable-offset ctx access")
+__failure __msg("variable ctx access var_off=(0x0; 0x4)")
+__naked void variable_offset_ctx_access(void)
+{
+ asm volatile (" \
+ /* Get an unknown value */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* Make it small and 4-byte aligned */ \
+ r2 &= 4; \
+ /* add it to skb. We now have either &skb->len or\
+ * &skb->pkt_type, but we don't know which \
+ */ \
+ r1 += r2; \
+ /* dereference it */ \
+ r0 = *(u32*)(r1 + 0); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("variable-offset stack read, priv vs unpriv")
+__success __failure_unpriv
+__msg_unpriv("R2 variable stack access prohibited for !root")
+__retval(0)
+__naked void stack_read_priv_vs_unpriv(void)
+{
+ asm volatile (" \
+ /* Fill the top 8 bytes of the stack */ \
+ r0 = 0; \
+ *(u64*)(r10 - 8) = r0; \
+ /* Get an unknown value */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* Make it small and 4-byte aligned */ \
+ r2 &= 4; \
+ r2 -= 8; \
+ /* add it to fp. We now have either fp-4 or fp-8, but\
+ * we don't know which \
+ */ \
+ r2 += r10; \
+ /* dereference it for a stack read */ \
+ r0 = *(u32*)(r2 + 0); \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("variable-offset stack read, uninitialized")
+__success
+__failure_unpriv __msg_unpriv("R2 variable stack access prohibited for !root")
+__naked void variable_offset_stack_read_uninitialized(void)
+{
+ asm volatile (" \
+ /* Get an unknown value */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* Make it small and 4-byte aligned */ \
+ r2 &= 4; \
+ r2 -= 8; \
+ /* add it to fp. We now have either fp-4 or fp-8, but\
+ * we don't know which \
+ */ \
+ r2 += r10; \
+ /* dereference it for a stack read */ \
+ r0 = *(u32*)(r2 + 0); \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("variable-offset stack write, priv vs unpriv")
+__success
+/* Check that the maximum stack depth is correctly maintained according to the
+ * maximum possible variable offset.
+ */
+__log_level(4) __msg("stack depth 16")
+__failure_unpriv
+/* Variable stack access is rejected for unprivileged.
+ */
+__msg_unpriv("R2 variable stack access prohibited for !root")
+__retval(0)
+__naked void stack_write_priv_vs_unpriv(void)
+{
+ asm volatile (" \
+ /* Get an unknown value */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* Make it small and 8-byte aligned */ \
+ r2 &= 8; \
+ r2 -= 16; \
+ /* Add it to fp. We now have either fp-8 or \
+ * fp-16, but we don't know which \
+ */ \
+ r2 += r10; \
+ /* Dereference it for a stack write */ \
+ r0 = 0; \
+ *(u64*)(r2 + 0) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+/* Similar to the previous test, but this time also perform a read from the
+ * address written to with a variable offset. The read is allowed, showing that,
+ * after a variable-offset write, a priviledged program can read the slots that
+ * were in the range of that write (even if the verifier doesn't actually know if
+ * the slot being read was really written to or not.
+ *
+ * Despite this test being mostly a superset, the previous test is also kept for
+ * the sake of it checking the stack depth in the case where there is no read.
+ */
+SEC("socket")
+__description("variable-offset stack write followed by read")
+__success
+/* Check that the maximum stack depth is correctly maintained according to the
+ * maximum possible variable offset.
+ */
+__log_level(4) __msg("stack depth 16")
+__failure_unpriv
+__msg_unpriv("R2 variable stack access prohibited for !root")
+__retval(0)
+__naked void stack_write_followed_by_read(void)
+{
+ asm volatile (" \
+ /* Get an unknown value */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* Make it small and 8-byte aligned */ \
+ r2 &= 8; \
+ r2 -= 16; \
+ /* Add it to fp. We now have either fp-8 or fp-16, but\
+ * we don't know which \
+ */ \
+ r2 += r10; \
+ /* Dereference it for a stack write */ \
+ r0 = 0; \
+ *(u64*)(r2 + 0) = r0; \
+ /* Now read from the address we just wrote. */ \
+ r3 = *(u64*)(r2 + 0); \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("variable-offset stack write clobbers spilled regs")
+__failure
+/* In the priviledged case, dereferencing a spilled-and-then-filled
+ * register is rejected because the previous variable offset stack
+ * write might have overwritten the spilled pointer (i.e. we lose track
+ * of the spilled register when we analyze the write).
+ */
+__msg("R2 invalid mem access 'scalar'")
+__failure_unpriv
+/* The unprivileged case is not too interesting; variable
+ * stack access is rejected.
+ */
+__msg_unpriv("R2 variable stack access prohibited for !root")
+__naked void stack_write_clobbers_spilled_regs(void)
+{
+ asm volatile (" \
+ /* Dummy instruction; needed because we need to patch the next one\
+ * and we can't patch the first instruction. \
+ */ \
+ r6 = 0; \
+ /* Make R0 a map ptr */ \
+ r0 = %[map_hash_8b] ll; \
+ /* Get an unknown value */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* Make it small and 8-byte aligned */ \
+ r2 &= 8; \
+ r2 -= 16; \
+ /* Add it to fp. We now have either fp-8 or fp-16, but\
+ * we don't know which. \
+ */ \
+ r2 += r10; \
+ /* Spill R0(map ptr) into stack */ \
+ *(u64*)(r10 - 8) = r0; \
+ /* Dereference the unknown value for a stack write */\
+ r0 = 0; \
+ *(u64*)(r2 + 0) = r0; \
+ /* Fill the register back into R2 */ \
+ r2 = *(u64*)(r10 - 8); \
+ /* Try to dereference R2 for a memory load */ \
+ r0 = *(u64*)(r2 + 8); \
+ exit; \
+" :
+ : __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("sockops")
+__description("indirect variable-offset stack access, unbounded")
+__failure __msg("invalid unbounded variable-offset write to stack R4")
+__naked void variable_offset_stack_access_unbounded(void)
+{
+ asm volatile (" \
+ r2 = 6; \
+ r3 = 28; \
+ /* Fill the top 16 bytes of the stack. */ \
+ r4 = 0; \
+ *(u64*)(r10 - 16) = r4; \
+ r4 = 0; \
+ *(u64*)(r10 - 8) = r4; \
+ /* Get an unknown value. */ \
+ r4 = *(u64*)(r1 + %[bpf_sock_ops_bytes_received]);\
+ /* Check the lower bound but don't check the upper one. */\
+ if r4 s< 0 goto l0_%=; \
+ /* Point the lower bound to initialized stack. Offset is now in range\
+ * from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded.\
+ */ \
+ r4 -= 16; \
+ r4 += r10; \
+ r5 = 8; \
+ /* Dereference it indirectly. */ \
+ call %[bpf_getsockopt]; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_getsockopt),
+ __imm_const(bpf_sock_ops_bytes_received, offsetof(struct bpf_sock_ops, bytes_received))
+ : __clobber_all);
+}
+
+SEC("lwt_in")
+__description("indirect variable-offset stack access, max out of bound")
+__failure __msg("invalid variable-offset read from stack R2")
+__naked void access_max_out_of_bound(void)
+{
+ asm volatile (" \
+ /* Fill the top 8 bytes of the stack */ \
+ r2 = 0; \
+ *(u64*)(r10 - 8) = r2; \
+ /* Get an unknown value */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* Make it small and 4-byte aligned */ \
+ r2 &= 4; \
+ r2 -= 8; \
+ /* add it to fp. We now have either fp-4 or fp-8, but\
+ * we don't know which \
+ */ \
+ r2 += r10; \
+ /* dereference it indirectly */ \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+/* Similar to the test above, but this time check the special case of a
+ * zero-sized stack access. We used to have a bug causing crashes for zero-sized
+ * out-of-bounds accesses.
+ */
+SEC("socket")
+__description("indirect variable-offset stack access, zero-sized, max out of bound")
+__failure __msg("invalid variable-offset write to stack R1")
+__naked void zero_sized_access_max_out_of_bound(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ /* Fill some stack */ \
+ *(u64*)(r10 - 16) = r0; \
+ *(u64*)(r10 - 8) = r0; \
+ /* Get an unknown value */ \
+ r1 = *(u32*)(r1 + 0); \
+ r1 &= 63; \
+ r1 += -16; \
+ /* r1 is now anywhere in [-16,48) */ \
+ r1 += r10; \
+ r2 = 0; \
+ r3 = 0; \
+ call %[bpf_probe_read_kernel]; \
+ exit; \
+" :
+ : __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("lwt_in")
+__description("indirect variable-offset stack access, min out of bound")
+__failure __msg("invalid variable-offset read from stack R2")
+__naked void access_min_out_of_bound(void)
+{
+ asm volatile (" \
+ /* Fill the top 8 bytes of the stack */ \
+ r2 = 0; \
+ *(u64*)(r10 - 8) = r2; \
+ /* Get an unknown value */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* Make it small and 4-byte aligned */ \
+ r2 &= 4; \
+ r2 -= 516; \
+ /* add it to fp. We now have either fp-516 or fp-512, but\
+ * we don't know which \
+ */ \
+ r2 += r10; \
+ /* dereference it indirectly */ \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("indirect variable-offset stack access, min_off < min_initialized")
+__success
+__failure_unpriv __msg_unpriv("R2 variable stack access prohibited for !root")
+__naked void access_min_off_min_initialized(void)
+{
+ asm volatile (" \
+ /* Fill only the top 8 bytes of the stack. */ \
+ r2 = 0; \
+ *(u64*)(r10 - 8) = r2; \
+ /* Get an unknown value */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* Make it small and 4-byte aligned. */ \
+ r2 &= 4; \
+ r2 -= 16; \
+ /* Add it to fp. We now have either fp-12 or fp-16, but we don't know\
+ * which. fp-16 size 8 is partially uninitialized stack.\
+ */ \
+ r2 += r10; \
+ /* Dereference it indirectly. */ \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("cgroup/skb")
+__description("indirect variable-offset stack access, priv vs unpriv")
+__success __failure_unpriv
+__msg_unpriv("R2 variable stack access prohibited for !root")
+__retval(0)
+__naked void stack_access_priv_vs_unpriv(void)
+{
+ asm volatile (" \
+ /* Fill the top 16 bytes of the stack. */ \
+ r2 = 0; \
+ *(u64*)(r10 - 16) = r2; \
+ r2 = 0; \
+ *(u64*)(r10 - 8) = r2; \
+ /* Get an unknown value. */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* Make it small and 4-byte aligned. */ \
+ r2 &= 4; \
+ r2 -= 16; \
+ /* Add it to fp. We now have either fp-12 or fp-16, we don't know\
+ * which, but either way it points to initialized stack.\
+ */ \
+ r2 += r10; \
+ /* Dereference it indirectly. */ \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("lwt_in")
+__description("indirect variable-offset stack access, ok")
+__success __retval(0)
+__naked void variable_offset_stack_access_ok(void)
+{
+ asm volatile (" \
+ /* Fill the top 16 bytes of the stack. */ \
+ r2 = 0; \
+ *(u64*)(r10 - 16) = r2; \
+ r2 = 0; \
+ *(u64*)(r10 - 8) = r2; \
+ /* Get an unknown value. */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* Make it small and 4-byte aligned. */ \
+ r2 &= 4; \
+ r2 -= 16; \
+ /* Add it to fp. We now have either fp-12 or fp-16, we don't know\
+ * which, but either way it points to initialized stack.\
+ */ \
+ r2 += r10; \
+ /* Dereference it indirectly. */ \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c
new file mode 100644
index 000000000000..3e2d76ee8050
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC. */
+
+#include <vmlinux.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+static char buf[64];
+
+SEC("lsm.s/file_open")
+__success
+int BPF_PROG(get_task_exe_file_and_put_kfunc_from_current_sleepable)
+{
+ struct file *acquired;
+
+ acquired = bpf_get_task_exe_file(bpf_get_current_task_btf());
+ if (!acquired)
+ return 0;
+
+ bpf_put_file(acquired);
+ return 0;
+}
+
+SEC("lsm/file_open")
+__success
+int BPF_PROG(get_task_exe_file_and_put_kfunc_from_current_non_sleepable, struct file *file)
+{
+ struct file *acquired;
+
+ acquired = bpf_get_task_exe_file(bpf_get_current_task_btf());
+ if (!acquired)
+ return 0;
+
+ bpf_put_file(acquired);
+ return 0;
+}
+
+SEC("lsm.s/task_alloc")
+__success
+int BPF_PROG(get_task_exe_file_and_put_kfunc_from_argument,
+ struct task_struct *task)
+{
+ struct file *acquired;
+
+ acquired = bpf_get_task_exe_file(task);
+ if (!acquired)
+ return 0;
+
+ bpf_put_file(acquired);
+ return 0;
+}
+
+SEC("lsm.s/inode_getattr")
+__success
+int BPF_PROG(path_d_path_from_path_argument, struct path *path)
+{
+ int ret;
+
+ ret = bpf_path_d_path(path, buf, sizeof(buf));
+ __sink(ret);
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__success
+int BPF_PROG(path_d_path_from_file_argument, struct file *file)
+{
+ int ret;
+ struct path *path;
+
+ /* The f_path member is a path which is embedded directly within a
+ * file. Therefore, a pointer to such embedded members are still
+ * recognized by the BPF verifier as being PTR_TRUSTED as it's
+ * essentially PTR_TRUSTED w/ a non-zero fixed offset.
+ */
+ path = &file->f_path;
+ ret = bpf_path_d_path(path, buf, sizeof(buf));
+ __sink(ret);
+ return 0;
+}
+
+SEC("lsm.s/inode_rename")
+__success
+int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ struct inode *inode = new_dentry->d_inode;
+ ino_t ino;
+
+ if (!inode)
+ return 0;
+ ino = inode->i_ino;
+ if (ino == 0)
+ return -EACCES;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
new file mode 100644
index 000000000000..4b392c6c8fc4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC. */
+
+#include <vmlinux.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <linux/limits.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+static char buf[PATH_MAX];
+
+SEC("lsm.s/file_open")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(get_task_exe_file_kfunc_null)
+{
+ struct file *acquired;
+
+ /* Can't pass a NULL pointer to bpf_get_task_exe_file(). */
+ acquired = bpf_get_task_exe_file(NULL);
+ if (!acquired)
+ return 0;
+
+ bpf_put_file(acquired);
+ return 0;
+}
+
+SEC("lsm.s/inode_getxattr")
+__failure __msg("arg#0 pointer type STRUCT task_struct must point to scalar, or struct with scalar")
+int BPF_PROG(get_task_exe_file_kfunc_fp)
+{
+ u64 x;
+ struct file *acquired;
+ struct task_struct *task;
+
+ task = (struct task_struct *)&x;
+ /* Can't pass random frame pointer to bpf_get_task_exe_file(). */
+ acquired = bpf_get_task_exe_file(task);
+ if (!acquired)
+ return 0;
+
+ bpf_put_file(acquired);
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("R1 must be referenced or trusted")
+int BPF_PROG(get_task_exe_file_kfunc_untrusted)
+{
+ struct file *acquired;
+ struct task_struct *parent;
+
+ /* Walking a trusted struct task_struct returned from
+ * bpf_get_current_task_btf() yields an untrusted pointer.
+ */
+ parent = bpf_get_current_task_btf()->parent;
+ /* Can't pass untrusted pointer to bpf_get_task_exe_file(). */
+ acquired = bpf_get_task_exe_file(parent);
+ if (!acquired)
+ return 0;
+
+ bpf_put_file(acquired);
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("Unreleased reference")
+int BPF_PROG(get_task_exe_file_kfunc_unreleased)
+{
+ struct file *acquired;
+
+ acquired = bpf_get_task_exe_file(bpf_get_current_task_btf());
+ if (!acquired)
+ return 0;
+
+ /* Acquired but never released. */
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("release kernel function bpf_put_file expects")
+int BPF_PROG(put_file_kfunc_unacquired, struct file *file)
+{
+ /* Can't release an unacquired pointer. */
+ bpf_put_file(file);
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(path_d_path_kfunc_null)
+{
+ /* Can't pass NULL value to bpf_path_d_path() kfunc. */
+ bpf_path_d_path(NULL, buf, sizeof(buf));
+ return 0;
+}
+
+SEC("lsm.s/task_alloc")
+__failure __msg("R1 must be referenced or trusted")
+int BPF_PROG(path_d_path_kfunc_untrusted_from_argument, struct task_struct *task)
+{
+ struct path *root;
+
+ /* Walking a trusted argument typically yields an untrusted
+ * pointer. This is one example of that.
+ */
+ root = &task->fs->root;
+ bpf_path_d_path(root, buf, sizeof(buf));
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("R1 must be referenced or trusted")
+int BPF_PROG(path_d_path_kfunc_untrusted_from_current)
+{
+ struct path *pwd;
+ struct task_struct *current;
+
+ current = bpf_get_current_task_btf();
+ /* Walking a trusted pointer returned from bpf_get_current_task_btf()
+ * yields an untrusted pointer.
+ */
+ pwd = &current->fs->pwd;
+ bpf_path_d_path(pwd, buf, sizeof(buf));
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("kernel function bpf_path_d_path args#0 expected pointer to STRUCT path but R1 has a pointer to STRUCT file")
+int BPF_PROG(path_d_path_kfunc_type_mismatch, struct file *file)
+{
+ bpf_path_d_path((struct path *)&file->f_task_work, buf, sizeof(buf));
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("invalid access to map value, value_size=4096 off=0 size=8192")
+int BPF_PROG(path_d_path_kfunc_invalid_buf_sz, struct file *file)
+{
+ /* bpf_path_d_path() enforces a constraint on the buffer size supplied
+ * by the BPF LSM program via the __sz annotation. buf here is set to
+ * PATH_MAX, so let's ensure that the BPF verifier rejects BPF_PROG_LOAD
+ * attempts if the supplied size and the actual size of the buffer
+ * mismatches.
+ */
+ bpf_path_d_path(&file->f_path, buf, PATH_MAX * 2);
+ return 0;
+}
+
+SEC("fentry/vfs_open")
+__failure __msg("calling kernel function bpf_path_d_path is not allowed")
+int BPF_PROG(path_d_path_kfunc_non_lsm, struct path *path, struct file *f)
+{
+ /* Calling bpf_path_d_path() from a non-LSM BPF program isn't permitted.
+ */
+ bpf_path_d_path(path, buf, sizeof(buf));
+ return 0;
+}
+
+SEC("lsm.s/inode_rename")
+__failure __msg("invalid mem access 'trusted_ptr_or_null_'")
+int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ struct inode *inode = new_dentry->d_inode;
+ ino_t ino;
+
+ ino = inode->i_ino;
+ if (ino == 0)
+ return -EACCES;
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_xadd.c b/tools/testing/selftests/bpf/progs/verifier_xadd.c
new file mode 100644
index 000000000000..05a0a55adb45
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_xadd.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/xadd.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("tc")
+__description("xadd/w check unaligned stack")
+__failure __msg("misaligned stack access off")
+__naked void xadd_w_check_unaligned_stack(void)
+{
+ asm volatile (" \
+ r0 = 1; \
+ *(u64*)(r10 - 8) = r0; \
+ lock *(u32 *)(r10 - 7) += w0; \
+ r0 = *(u64*)(r10 - 8); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("xadd/w check unaligned map")
+__failure __msg("misaligned value access off")
+__naked void xadd_w_check_unaligned_map(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: r1 = 1; \
+ lock *(u32 *)(r0 + 3) += w1; \
+ r0 = *(u32*)(r0 + 3); \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("xadd/w check unaligned pkt")
+__failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void xadd_w_check_unaligned_pkt(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 < r3 goto l0_%=; \
+ r0 = 99; \
+ goto l1_%=; \
+l0_%=: r0 = 1; \
+ r1 = 0; \
+ *(u32*)(r2 + 0) = r1; \
+ r1 = 0; \
+ *(u32*)(r2 + 3) = r1; \
+ lock *(u32 *)(r2 + 1) += w0; \
+ lock *(u32 *)(r2 + 2) += w0; \
+ r0 = *(u32*)(r2 + 1); \
+l1_%=: exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("xadd/w check whether src/dst got mangled, 1")
+__success __retval(3)
+__naked void src_dst_got_mangled_1(void)
+{
+ asm volatile (" \
+ r0 = 1; \
+ r6 = r0; \
+ r7 = r10; \
+ *(u64*)(r10 - 8) = r0; \
+ lock *(u64 *)(r10 - 8) += r0; \
+ lock *(u64 *)(r10 - 8) += r0; \
+ if r6 != r0 goto l0_%=; \
+ if r7 != r10 goto l0_%=; \
+ r0 = *(u64*)(r10 - 8); \
+ exit; \
+l0_%=: r0 = 42; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__description("xadd/w check whether src/dst got mangled, 2")
+__success __retval(3)
+__naked void src_dst_got_mangled_2(void)
+{
+ asm volatile (" \
+ r0 = 1; \
+ r6 = r0; \
+ r7 = r10; \
+ *(u32*)(r10 - 8) = r0; \
+ lock *(u32 *)(r10 - 8) += w0; \
+ lock *(u32 *)(r10 - 8) += w0; \
+ if r6 != r0 goto l0_%=; \
+ if r7 != r10 goto l0_%=; \
+ r0 = *(u32*)(r10 - 8); \
+ exit; \
+l0_%=: r0 = 42; \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_xdp.c b/tools/testing/selftests/bpf/progs/verifier_xdp.c
new file mode 100644
index 000000000000..50768ed179b3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_xdp.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/xdp.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("xdp")
+__description("XDP, using ifindex from netdev")
+__success __retval(1)
+__naked void xdp_using_ifindex_from_netdev(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ r2 = *(u32*)(r1 + %[xdp_md_ingress_ifindex]); \
+ if r2 < 1 goto l0_%=; \
+ r0 = 1; \
+l0_%=: exit; \
+" :
+ : __imm_const(xdp_md_ingress_ifindex, offsetof(struct xdp_md, ingress_ifindex))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c
new file mode 100644
index 000000000000..df2dfd1b15d1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c
@@ -0,0 +1,1722 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end mangling, bad access 1")
+__failure __msg("R3 pointer arithmetic on pkt_end")
+__naked void end_mangling_bad_access_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ r3 += 8; \
+ if r1 > r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end mangling, bad access 2")
+__failure __msg("R3 pointer arithmetic on pkt_end")
+__naked void end_mangling_bad_access_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ r3 -= 8; \
+ if r1 > r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' > pkt_end, corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void end_corner_case_good_access_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 > r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' > pkt_end, bad access 1")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_end_bad_access_1_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 > r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 4); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' > pkt_end, bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_end_bad_access_2_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 > r3 goto l0_%=; \
+l0_%=: r0 = *(u64*)(r1 - 8); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' > pkt_end, corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 9; \
+ if r1 > r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 9); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r1 > r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 7); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end > pkt_data', good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void end_pkt_data_good_access_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 > r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u32*)(r1 - 5); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end > pkt_data', corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 6; \
+ if r3 > r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 6); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end > pkt_data', bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_data_bad_access_2_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 > r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end > pkt_data', corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void data_corner_case_good_access_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r3 > r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 7); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end > pkt_data', corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 > r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' < pkt_end, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void data_pkt_end_good_access_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 < r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u32*)(r1 - 5); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_3(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 6; \
+ if r1 < r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 6); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' < pkt_end, bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_end_bad_access_2_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 < r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' < pkt_end, corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void end_corner_case_good_access_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r1 < r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 7); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' < pkt_end, corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_3(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 < r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end < pkt_data', corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void data_corner_case_good_access_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 < r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end < pkt_data', bad access 1")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_data_bad_access_1_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 < r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 4); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end < pkt_data', bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_data_bad_access_2_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 < r1 goto l0_%=; \
+l0_%=: r0 = *(u64*)(r1 - 8); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end < pkt_data', corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_4(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 9; \
+ if r3 < r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 9); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end < pkt_data', corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_4(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r3 < r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 7); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' >= pkt_end, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void data_pkt_end_good_access_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 >= r3 goto l0_%=; \
+ r0 = *(u32*)(r1 - 5); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_5(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 6; \
+ if r1 >= r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 6); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' >= pkt_end, bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_end_bad_access_2_3(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 >= r3 goto l0_%=; \
+l0_%=: r0 = *(u32*)(r1 - 5); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' >= pkt_end, corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void end_corner_case_good_access_3(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r1 >= r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 7); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_5(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 >= r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end >= pkt_data', corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void data_corner_case_good_access_3(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 >= r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end >= pkt_data', bad access 1")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_data_bad_access_1_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 >= r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 4); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end >= pkt_data', bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_data_bad_access_2_3(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 >= r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end >= pkt_data', corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_6(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 9; \
+ if r3 >= r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 9); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_6(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r3 >= r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 7); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' <= pkt_end, corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void end_corner_case_good_access_4(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 <= r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' <= pkt_end, bad access 1")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_end_bad_access_1_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 <= r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 4); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' <= pkt_end, bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_end_bad_access_2_4(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 <= r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_7(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 9; \
+ if r1 <= r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 9); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_7(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r1 <= r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 7); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end <= pkt_data', good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void end_pkt_data_good_access_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 <= r1 goto l0_%=; \
+ r0 = *(u32*)(r1 - 5); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_8(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 6; \
+ if r3 <= r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 6); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end <= pkt_data', bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_data_bad_access_2_4(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 <= r1 goto l0_%=; \
+l0_%=: r0 = *(u32*)(r1 - 5); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end <= pkt_data', corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void data_corner_case_good_access_4(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r3 <= r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 7); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_end <= pkt_data', corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_8(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 <= r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' > pkt_data, corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void data_corner_case_good_access_5(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 > r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' > pkt_data, bad access 1")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_data_bad_access_1_3(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 > r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 4); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' > pkt_data, bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_data_bad_access_2_5(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 > r3 goto l0_%=; \
+l0_%=: r0 = *(u64*)(r1 - 8); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_9(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 9; \
+ if r1 > r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 9); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_9(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r1 > r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 7); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data > pkt_meta', good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void data_pkt_meta_good_access_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 > r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u32*)(r1 - 5); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_10(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 6; \
+ if r3 > r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 6); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data > pkt_meta', bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_meta_bad_access_2_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 > r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data > pkt_meta', corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void meta_corner_case_good_access_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r3 > r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 7); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data > pkt_meta', corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_10(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 > r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' < pkt_data, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void meta_pkt_data_good_access_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 < r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u32*)(r1 - 5); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_11(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 6; \
+ if r1 < r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 6); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' < pkt_data, bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_data_bad_access_2_6(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 < r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' < pkt_data, corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void data_corner_case_good_access_6(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r1 < r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 7); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_11(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 < r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data < pkt_meta', corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void meta_corner_case_good_access_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 < r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data < pkt_meta', bad access 1")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_meta_bad_access_1_1(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 < r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 4); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data < pkt_meta', bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_meta_bad_access_2_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 < r1 goto l0_%=; \
+l0_%=: r0 = *(u64*)(r1 - 8); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data < pkt_meta', corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_12(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 9; \
+ if r3 < r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 9); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_12(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r3 < r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 7); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' >= pkt_data, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void meta_pkt_data_good_access_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 >= r3 goto l0_%=; \
+ r0 = *(u32*)(r1 - 5); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_13(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 6; \
+ if r1 >= r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 6); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' >= pkt_data, bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_data_bad_access_2_7(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 >= r3 goto l0_%=; \
+l0_%=: r0 = *(u32*)(r1 - 5); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' >= pkt_data, corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void data_corner_case_good_access_7(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r1 >= r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 7); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_13(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 >= r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data >= pkt_meta', corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void meta_corner_case_good_access_3(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 >= r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data >= pkt_meta', bad access 1")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_meta_bad_access_1_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 >= r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 4); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data >= pkt_meta', bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_meta_bad_access_2_3(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 >= r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_14(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 9; \
+ if r3 >= r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 9); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_14(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r3 >= r1 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 7); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' <= pkt_data, corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void data_corner_case_good_access_8(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 <= r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 8); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' <= pkt_data, bad access 1")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_data_bad_access_1_4(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 <= r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 4); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' <= pkt_data, bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_data_bad_access_2_8(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r1 <= r3 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_15(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 9; \
+ if r1 <= r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 9); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_15(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r1 <= r3 goto l0_%=; \
+ goto l1_%=; \
+l0_%=: r0 = *(u64*)(r1 - 7); \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data <= pkt_meta', good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void data_pkt_meta_good_access_2(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 <= r1 goto l0_%=; \
+ r0 = *(u32*)(r1 - 5); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_bad_access_16(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 6; \
+ if r3 <= r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 6); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data <= pkt_meta', bad access 2")
+__failure __msg("R1 offset is outside of the packet")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void pkt_meta_bad_access_2_4(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 <= r1 goto l0_%=; \
+l0_%=: r0 = *(u32*)(r1 - 5); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data <= pkt_meta', corner case, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void meta_corner_case_good_access_4(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 7; \
+ if r3 <= r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 7); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access")
+__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
+__naked void corner_case_1_good_access_16(void)
+{
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
+ r3 = *(u32*)(r1 + %[xdp_md_data]); \
+ r1 = r2; \
+ r1 += 8; \
+ if r3 <= r1 goto l0_%=; \
+ r0 = *(u64*)(r1 - 8); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/vrf_socket_lookup.c b/tools/testing/selftests/bpf/progs/vrf_socket_lookup.c
new file mode 100644
index 000000000000..bcfb6feb38c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/vrf_socket_lookup.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/pkt_cls.h>
+#include <stdbool.h>
+
+int lookup_status;
+bool test_xdp;
+bool tcp_skc;
+
+#define CUR_NS BPF_F_CURRENT_NETNS
+
+static void socket_lookup(void *ctx, void *data_end, void *data)
+{
+ struct ethhdr *eth = data;
+ struct bpf_sock_tuple *tp;
+ struct bpf_sock *sk;
+ struct iphdr *iph;
+ int tplen;
+
+ if (eth + 1 > data_end)
+ return;
+
+ if (eth->h_proto != bpf_htons(ETH_P_IP))
+ return;
+
+ iph = (struct iphdr *)(eth + 1);
+ if (iph + 1 > data_end)
+ return;
+
+ tp = (struct bpf_sock_tuple *)&iph->saddr;
+ tplen = sizeof(tp->ipv4);
+ if ((void *)tp + tplen > data_end)
+ return;
+
+ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ if (tcp_skc)
+ sk = bpf_skc_lookup_tcp(ctx, tp, tplen, CUR_NS, 0);
+ else
+ sk = bpf_sk_lookup_tcp(ctx, tp, tplen, CUR_NS, 0);
+ break;
+ case IPPROTO_UDP:
+ sk = bpf_sk_lookup_udp(ctx, tp, tplen, CUR_NS, 0);
+ break;
+ default:
+ return;
+ }
+
+ lookup_status = 0;
+
+ if (sk) {
+ bpf_sk_release(sk);
+ lookup_status = 1;
+ }
+}
+
+SEC("tc")
+int tc_socket_lookup(struct __sk_buff *skb)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+
+ if (test_xdp)
+ return TC_ACT_UNSPEC;
+
+ socket_lookup(skb, data_end, data);
+ return TC_ACT_UNSPEC;
+}
+
+SEC("xdp")
+int xdp_socket_lookup(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+
+ if (!test_xdp)
+ return XDP_PASS;
+
+ socket_lookup(xdp, data_end, data);
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/wq.c b/tools/testing/selftests/bpf/progs/wq.c
new file mode 100644
index 000000000000..2f1ba08c293e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/wq.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Benjamin Tissoires
+ */
+
+#include "bpf_experimental.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct hmap_elem {
+ int counter;
+ struct bpf_timer timer; /* unused */
+ struct bpf_spin_lock lock; /* unused */
+ struct bpf_wq work;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1000);
+ __type(key, int);
+ __type(value, struct hmap_elem);
+} hmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, 1000);
+ __type(key, int);
+ __type(value, struct hmap_elem);
+} hmap_malloc SEC(".maps");
+
+struct elem {
+ int ok_offset;
+ struct bpf_wq w;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, 4);
+ __type(key, int);
+ __type(value, struct elem);
+} lru SEC(".maps");
+
+__u32 ok;
+__u32 ok_sleepable;
+
+static int test_elem_callback(void *map, int *key,
+ int (callback_fn)(void *map, int *key, void *value))
+{
+ struct elem init = {}, *val;
+ struct bpf_wq *wq;
+
+ if ((ok & (1 << *key) ||
+ (ok_sleepable & (1 << *key))))
+ return -22;
+
+ if (map == &lru &&
+ bpf_map_update_elem(map, key, &init, 0))
+ return -1;
+
+ val = bpf_map_lookup_elem(map, key);
+ if (!val)
+ return -2;
+
+ val->ok_offset = *key;
+
+ wq = &val->w;
+ if (bpf_wq_init(wq, map, 0) != 0)
+ return -3;
+
+ if (bpf_wq_set_callback(wq, callback_fn, 0))
+ return -4;
+
+ if (bpf_wq_start(wq, 0))
+ return -5;
+
+ return 0;
+}
+
+static int test_hmap_elem_callback(void *map, int *key,
+ int (callback_fn)(void *map, int *key, void *value))
+{
+ struct hmap_elem init = {}, *val;
+ struct bpf_wq *wq;
+
+ if ((ok & (1 << *key) ||
+ (ok_sleepable & (1 << *key))))
+ return -22;
+
+ if (bpf_map_update_elem(map, key, &init, 0))
+ return -1;
+
+ val = bpf_map_lookup_elem(map, key);
+ if (!val)
+ return -2;
+
+ wq = &val->work;
+ if (bpf_wq_init(wq, map, 0) != 0)
+ return -3;
+
+ if (bpf_wq_set_callback(wq, callback_fn, 0))
+ return -4;
+
+ if (bpf_wq_start(wq, 0))
+ return -5;
+
+ return 0;
+}
+
+/* callback for non sleepable workqueue */
+static int wq_callback(void *map, int *key, void *value)
+{
+ bpf_kfunc_common_test();
+ ok |= (1 << *key);
+ return 0;
+}
+
+/* callback for sleepable workqueue */
+static int wq_cb_sleepable(void *map, int *key, void *value)
+{
+ struct elem *data = (struct elem *)value;
+ int offset = data->ok_offset;
+
+ if (*key != offset)
+ return 0;
+
+ bpf_kfunc_call_test_sleepable();
+ ok_sleepable |= (1 << offset);
+ return 0;
+}
+
+SEC("tc")
+/* test that workqueues can be used from an array */
+__retval(0)
+long test_call_array_sleepable(void *ctx)
+{
+ int key = 0;
+
+ return test_elem_callback(&array, &key, wq_cb_sleepable);
+}
+
+SEC("syscall")
+/* Same test than above but from a sleepable context. */
+__retval(0)
+long test_syscall_array_sleepable(void *ctx)
+{
+ int key = 1;
+
+ return test_elem_callback(&array, &key, wq_cb_sleepable);
+}
+
+SEC("tc")
+/* test that workqueues can be used from a hashmap */
+__retval(0)
+long test_call_hash_sleepable(void *ctx)
+{
+ int key = 2;
+
+ return test_hmap_elem_callback(&hmap, &key, wq_callback);
+}
+
+SEC("tc")
+/* test that workqueues can be used from a hashmap with NO_PREALLOC. */
+__retval(0)
+long test_call_hash_malloc_sleepable(void *ctx)
+{
+ int key = 3;
+
+ return test_hmap_elem_callback(&hmap_malloc, &key, wq_callback);
+}
+
+SEC("tc")
+/* test that workqueues can be used from a LRU map */
+__retval(0)
+long test_call_lru_sleepable(void *ctx)
+{
+ int key = 4;
+
+ return test_elem_callback(&lru, &key, wq_callback);
+}
diff --git a/tools/testing/selftests/bpf/progs/wq_failures.c b/tools/testing/selftests/bpf/progs/wq_failures.c
new file mode 100644
index 000000000000..4240211a1900
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/wq_failures.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Benjamin Tissoires
+ */
+
+#include "bpf_experimental.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct elem {
+ struct bpf_wq w;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, 4);
+ __type(key, int);
+ __type(value, struct elem);
+} lru SEC(".maps");
+
+/* callback for non sleepable workqueue */
+static int wq_callback(void *map, int *key, void *value)
+{
+ bpf_kfunc_common_test();
+ return 0;
+}
+
+/* callback for sleepable workqueue */
+static int wq_cb_sleepable(void *map, int *key, void *value)
+{
+ bpf_kfunc_call_test_sleepable();
+ return 0;
+}
+
+SEC("tc")
+/* test that bpf_wq_init takes a map as a second argument
+ */
+__log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure
+__msg(": (85) call bpf_wq_init#") /* anchor message */
+__msg("pointer in R2 isn't map pointer")
+long test_wq_init_nomap(void *ctx)
+{
+ struct bpf_wq *wq;
+ struct elem *val;
+ int key = 0;
+
+ val = bpf_map_lookup_elem(&array, &key);
+ if (!val)
+ return -1;
+
+ wq = &val->w;
+ if (bpf_wq_init(wq, &key, 0) != 0)
+ return -3;
+
+ return 0;
+}
+
+SEC("tc")
+/* test that the workqueue is part of the map in bpf_wq_init
+ */
+__log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure
+__msg(": (85) call bpf_wq_init#") /* anchor message */
+__msg("workqueue pointer in R1 map_uid=0 doesn't match map pointer in R2 map_uid=0")
+long test_wq_init_wrong_map(void *ctx)
+{
+ struct bpf_wq *wq;
+ struct elem *val;
+ int key = 0;
+
+ val = bpf_map_lookup_elem(&array, &key);
+ if (!val)
+ return -1;
+
+ wq = &val->w;
+ if (bpf_wq_init(wq, &lru, 0) != 0)
+ return -3;
+
+ return 0;
+}
+
+SEC("?tc")
+__log_level(2)
+__failure
+/* check that the first argument of bpf_wq_set_callback()
+ * is a correct bpf_wq pointer.
+ */
+__msg(": (85) call bpf_wq_set_callback_impl#") /* anchor message */
+__msg("arg#0 doesn't point to a map value")
+long test_wrong_wq_pointer(void *ctx)
+{
+ int key = 0;
+ struct bpf_wq *wq;
+
+ wq = bpf_map_lookup_elem(&array, &key);
+ if (!wq)
+ return 1;
+
+ if (bpf_wq_init(wq, &array, 0))
+ return 2;
+
+ if (bpf_wq_set_callback((void *)&wq, wq_callback, 0))
+ return 3;
+
+ return -22;
+}
+
+SEC("?tc")
+__log_level(2)
+__failure
+/* check that the first argument of bpf_wq_set_callback()
+ * is a correct bpf_wq pointer.
+ */
+__msg(": (85) call bpf_wq_set_callback_impl#") /* anchor message */
+__msg("off 1 doesn't point to 'struct bpf_wq' that is at 0")
+long test_wrong_wq_pointer_offset(void *ctx)
+{
+ int key = 0;
+ struct bpf_wq *wq;
+
+ wq = bpf_map_lookup_elem(&array, &key);
+ if (!wq)
+ return 1;
+
+ if (bpf_wq_init(wq, &array, 0))
+ return 2;
+
+ if (bpf_wq_set_callback((void *)wq + 1, wq_cb_sleepable, 0))
+ return 3;
+
+ return -22;
+}
diff --git a/tools/testing/selftests/bpf/progs/xdp_dummy.c b/tools/testing/selftests/bpf/progs/xdp_dummy.c
index ea25e8881992..d988b2e0cee8 100644
--- a/tools/testing/selftests/bpf/progs/xdp_dummy.c
+++ b/tools/testing/selftests/bpf/progs/xdp_dummy.c
@@ -4,7 +4,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-SEC("xdp_dummy")
+SEC("xdp")
int xdp_dummy_prog(struct xdp_md *ctx)
{
return XDP_PASS;
diff --git a/tools/testing/selftests/bpf/progs/xdp_features.c b/tools/testing/selftests/bpf/progs/xdp_features.c
new file mode 100644
index 000000000000..67424084a38a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_features.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <linux/netdev.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/udp.h>
+#include <asm-generic/errno-base.h>
+
+#include "xdp_features.h"
+
+#define ipv6_addr_equal(a, b) ((a).s6_addr32[0] == (b).s6_addr32[0] && \
+ (a).s6_addr32[1] == (b).s6_addr32[1] && \
+ (a).s6_addr32[2] == (b).s6_addr32[2] && \
+ (a).s6_addr32[3] == (b).s6_addr32[3])
+
+struct net_device;
+struct bpf_prog;
+
+struct xdp_cpumap_stats {
+ unsigned int redirect;
+ unsigned int pass;
+ unsigned int drop;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} stats SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} dut_stats SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CPUMAP);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_cpumap_val));
+ __uint(max_entries, 1);
+} cpu_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_devmap_val));
+ __uint(max_entries, 1);
+} dev_map SEC(".maps");
+
+const volatile struct in6_addr tester_addr;
+const volatile struct in6_addr dut_addr;
+
+static __always_inline int
+xdp_process_echo_packet(struct xdp_md *xdp, bool dut)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eh = data;
+ struct tlv_hdr *tlv;
+ struct udphdr *uh;
+ __be16 port;
+
+ if (eh + 1 > (struct ethhdr *)data_end)
+ return -EINVAL;
+
+ if (eh->h_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *ih = (struct iphdr *)(eh + 1);
+ __be32 saddr = dut ? tester_addr.s6_addr32[3]
+ : dut_addr.s6_addr32[3];
+ __be32 daddr = dut ? dut_addr.s6_addr32[3]
+ : tester_addr.s6_addr32[3];
+
+ ih = (struct iphdr *)(eh + 1);
+ if (ih + 1 > (struct iphdr *)data_end)
+ return -EINVAL;
+
+ if (saddr != ih->saddr)
+ return -EINVAL;
+
+ if (daddr != ih->daddr)
+ return -EINVAL;
+
+ if (ih->protocol != IPPROTO_UDP)
+ return -EINVAL;
+
+ uh = (struct udphdr *)(ih + 1);
+ } else if (eh->h_proto == bpf_htons(ETH_P_IPV6)) {
+ struct in6_addr saddr = dut ? tester_addr : dut_addr;
+ struct in6_addr daddr = dut ? dut_addr : tester_addr;
+ struct ipv6hdr *ih6 = (struct ipv6hdr *)(eh + 1);
+
+ if (ih6 + 1 > (struct ipv6hdr *)data_end)
+ return -EINVAL;
+
+ if (!ipv6_addr_equal(saddr, ih6->saddr))
+ return -EINVAL;
+
+ if (!ipv6_addr_equal(daddr, ih6->daddr))
+ return -EINVAL;
+
+ if (ih6->nexthdr != IPPROTO_UDP)
+ return -EINVAL;
+
+ uh = (struct udphdr *)(ih6 + 1);
+ } else {
+ return -EINVAL;
+ }
+
+ if (uh + 1 > (struct udphdr *)data_end)
+ return -EINVAL;
+
+ port = dut ? uh->dest : uh->source;
+ if (port != bpf_htons(DUT_ECHO_PORT))
+ return -EINVAL;
+
+ tlv = (struct tlv_hdr *)(uh + 1);
+ if (tlv + 1 > data_end)
+ return -EINVAL;
+
+ return bpf_htons(tlv->type) == CMD_ECHO ? 0 : -EINVAL;
+}
+
+static __always_inline int
+xdp_update_stats(struct xdp_md *xdp, bool tx, bool dut)
+{
+ __u32 *val, key = 0;
+
+ if (xdp_process_echo_packet(xdp, tx))
+ return -EINVAL;
+
+ if (dut)
+ val = bpf_map_lookup_elem(&dut_stats, &key);
+ else
+ val = bpf_map_lookup_elem(&stats, &key);
+
+ if (val)
+ __sync_add_and_fetch(val, 1);
+
+ return 0;
+}
+
+/* Tester */
+
+SEC("xdp")
+int xdp_tester_check_tx(struct xdp_md *xdp)
+{
+ xdp_update_stats(xdp, true, false);
+
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int xdp_tester_check_rx(struct xdp_md *xdp)
+{
+ xdp_update_stats(xdp, false, false);
+
+ return XDP_PASS;
+}
+
+/* DUT */
+
+SEC("xdp")
+int xdp_do_pass(struct xdp_md *xdp)
+{
+ xdp_update_stats(xdp, true, true);
+
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int xdp_do_drop(struct xdp_md *xdp)
+{
+ if (xdp_update_stats(xdp, true, true))
+ return XDP_PASS;
+
+ return XDP_DROP;
+}
+
+SEC("xdp")
+int xdp_do_aborted(struct xdp_md *xdp)
+{
+ if (xdp_process_echo_packet(xdp, true))
+ return XDP_PASS;
+
+ return XDP_ABORTED;
+}
+
+SEC("xdp")
+int xdp_do_tx(struct xdp_md *xdp)
+{
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eh = data;
+ __u8 tmp_mac[ETH_ALEN];
+
+ if (xdp_update_stats(xdp, true, true))
+ return XDP_PASS;
+
+ __builtin_memcpy(tmp_mac, eh->h_source, ETH_ALEN);
+ __builtin_memcpy(eh->h_source, eh->h_dest, ETH_ALEN);
+ __builtin_memcpy(eh->h_dest, tmp_mac, ETH_ALEN);
+
+ return XDP_TX;
+}
+
+SEC("xdp")
+int xdp_do_redirect(struct xdp_md *xdp)
+{
+ if (xdp_process_echo_packet(xdp, true))
+ return XDP_PASS;
+
+ return bpf_redirect_map(&cpu_map, 0, 0);
+}
+
+SEC("tp_btf/xdp_exception")
+int BPF_PROG(xdp_exception, const struct net_device *dev,
+ const struct bpf_prog *xdp, __u32 act)
+{
+ __u32 *val, key = 0;
+
+ val = bpf_map_lookup_elem(&dut_stats, &key);
+ if (val)
+ __sync_add_and_fetch(val, 1);
+
+ return 0;
+}
+
+SEC("tp_btf/xdp_cpumap_kthread")
+int BPF_PROG(tp_xdp_cpumap_kthread, int map_id, unsigned int processed,
+ unsigned int drops, int sched, struct xdp_cpumap_stats *xdp_stats)
+{
+ __u32 *val, key = 0;
+
+ val = bpf_map_lookup_elem(&dut_stats, &key);
+ if (val)
+ __sync_add_and_fetch(val, 1);
+
+ return 0;
+}
+
+SEC("xdp/cpumap")
+int xdp_do_redirect_cpumap(struct xdp_md *xdp)
+{
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eh = data;
+ __u8 tmp_mac[ETH_ALEN];
+
+ if (xdp_process_echo_packet(xdp, true))
+ return XDP_PASS;
+
+ __builtin_memcpy(tmp_mac, eh->h_source, ETH_ALEN);
+ __builtin_memcpy(eh->h_source, eh->h_dest, ETH_ALEN);
+ __builtin_memcpy(eh->h_dest, tmp_mac, ETH_ALEN);
+
+ return bpf_redirect_map(&dev_map, 0, 0);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_flowtable.c b/tools/testing/selftests/bpf/progs/xdp_flowtable.c
new file mode 100644
index 000000000000..7fdc7b23ee74
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_flowtable.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86dd
+#define IP_MF 0x2000 /* "More Fragments" */
+#define IP_OFFSET 0x1fff /* "Fragment Offset" */
+#define AF_INET 2
+#define AF_INET6 10
+
+struct bpf_flowtable_opts___local {
+ s32 error;
+};
+
+struct flow_offload_tuple_rhash *
+bpf_xdp_flow_lookup(struct xdp_md *, struct bpf_fib_lookup *,
+ struct bpf_flowtable_opts___local *, u32) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} stats SEC(".maps");
+
+static bool xdp_flowtable_offload_check_iphdr(struct iphdr *iph)
+{
+ /* ip fragmented traffic */
+ if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET))
+ return false;
+
+ /* ip options */
+ if (iph->ihl * 4 != sizeof(*iph))
+ return false;
+
+ if (iph->ttl <= 1)
+ return false;
+
+ return true;
+}
+
+static bool xdp_flowtable_offload_check_tcp_state(void *ports, void *data_end,
+ u8 proto)
+{
+ if (proto == IPPROTO_TCP) {
+ struct tcphdr *tcph = ports;
+
+ if (tcph + 1 > data_end)
+ return false;
+
+ if (tcph->fin || tcph->rst)
+ return false;
+ }
+
+ return true;
+}
+
+struct flow_ports___local {
+ __be16 source, dest;
+} __attribute__((preserve_access_index));
+
+SEC("xdp.frags")
+int xdp_flowtable_do_lookup(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ struct bpf_flowtable_opts___local opts = {};
+ struct flow_offload_tuple_rhash *tuplehash;
+ struct bpf_fib_lookup tuple = {
+ .ifindex = ctx->ingress_ifindex,
+ };
+ void *data = (void *)(long)ctx->data;
+ struct ethhdr *eth = data;
+ struct flow_ports___local *ports;
+ __u32 *val, key = 0;
+
+ if (eth + 1 > data_end)
+ return XDP_DROP;
+
+ switch (eth->h_proto) {
+ case bpf_htons(ETH_P_IP): {
+ struct iphdr *iph = data + sizeof(*eth);
+
+ ports = (struct flow_ports___local *)(iph + 1);
+ if (ports + 1 > data_end)
+ return XDP_PASS;
+
+ /* sanity check on ip header */
+ if (!xdp_flowtable_offload_check_iphdr(iph))
+ return XDP_PASS;
+
+ if (!xdp_flowtable_offload_check_tcp_state(ports, data_end,
+ iph->protocol))
+ return XDP_PASS;
+
+ tuple.family = AF_INET;
+ tuple.tos = iph->tos;
+ tuple.l4_protocol = iph->protocol;
+ tuple.tot_len = bpf_ntohs(iph->tot_len);
+ tuple.ipv4_src = iph->saddr;
+ tuple.ipv4_dst = iph->daddr;
+ tuple.sport = ports->source;
+ tuple.dport = ports->dest;
+ break;
+ }
+ case bpf_htons(ETH_P_IPV6): {
+ struct in6_addr *src = (struct in6_addr *)tuple.ipv6_src;
+ struct in6_addr *dst = (struct in6_addr *)tuple.ipv6_dst;
+ struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+ ports = (struct flow_ports___local *)(ip6h + 1);
+ if (ports + 1 > data_end)
+ return XDP_PASS;
+
+ if (ip6h->hop_limit <= 1)
+ return XDP_PASS;
+
+ if (!xdp_flowtable_offload_check_tcp_state(ports, data_end,
+ ip6h->nexthdr))
+ return XDP_PASS;
+
+ tuple.family = AF_INET6;
+ tuple.l4_protocol = ip6h->nexthdr;
+ tuple.tot_len = bpf_ntohs(ip6h->payload_len);
+ *src = ip6h->saddr;
+ *dst = ip6h->daddr;
+ tuple.sport = ports->source;
+ tuple.dport = ports->dest;
+ break;
+ }
+ default:
+ return XDP_PASS;
+ }
+
+ tuplehash = bpf_xdp_flow_lookup(ctx, &tuple, &opts, sizeof(opts));
+ if (!tuplehash)
+ return XDP_PASS;
+
+ val = bpf_map_lookup_elem(&stats, &key);
+ if (val)
+ __sync_add_and_fetch(val, 1);
+
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
new file mode 100644
index 000000000000..330ece2eabdb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "xdp_metadata.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_XSKMAP);
+ __uint(max_entries, 256);
+ __type(key, __u32);
+ __type(value, __u32);
+} xsk SEC(".maps");
+
+__u64 pkts_skip = 0;
+__u64 pkts_fail = 0;
+__u64 pkts_redir = 0;
+
+extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx,
+ __u64 *timestamp) __ksym;
+extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
+ enum xdp_rss_hash_type *rss_type) __ksym;
+extern int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx,
+ __be16 *vlan_proto,
+ __u16 *vlan_tci) __ksym;
+
+SEC("xdp.frags")
+int rx(struct xdp_md *ctx)
+{
+ void *data, *data_meta, *data_end;
+ struct ipv6hdr *ip6h = NULL;
+ struct udphdr *udp = NULL;
+ struct iphdr *iph = NULL;
+ struct xdp_meta *meta;
+ struct ethhdr *eth;
+ int err;
+
+ data = (void *)(long)ctx->data;
+ data_end = (void *)(long)ctx->data_end;
+ eth = data;
+
+ if (eth + 1 < data_end && (eth->h_proto == bpf_htons(ETH_P_8021AD) ||
+ eth->h_proto == bpf_htons(ETH_P_8021Q)))
+ eth = (void *)eth + sizeof(struct vlan_hdr);
+
+ if (eth + 1 < data_end && eth->h_proto == bpf_htons(ETH_P_8021Q))
+ eth = (void *)eth + sizeof(struct vlan_hdr);
+
+ if (eth + 1 < data_end) {
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ iph = (void *)(eth + 1);
+ if (iph + 1 < data_end && iph->protocol == IPPROTO_UDP)
+ udp = (void *)(iph + 1);
+ }
+ if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
+ ip6h = (void *)(eth + 1);
+ if (ip6h + 1 < data_end && ip6h->nexthdr == IPPROTO_UDP)
+ udp = (void *)(ip6h + 1);
+ }
+ if (udp && udp + 1 > data_end)
+ udp = NULL;
+ }
+
+ if (!udp) {
+ __sync_add_and_fetch(&pkts_skip, 1);
+ return XDP_PASS;
+ }
+
+ /* Forwarding UDP:9091 to AF_XDP */
+ if (udp->dest != bpf_htons(9091)) {
+ __sync_add_and_fetch(&pkts_skip, 1);
+ return XDP_PASS;
+ }
+
+ err = bpf_xdp_adjust_meta(ctx, -(int)sizeof(struct xdp_meta));
+ if (err) {
+ __sync_add_and_fetch(&pkts_fail, 1);
+ return XDP_PASS;
+ }
+
+ data = (void *)(long)ctx->data;
+ data_meta = (void *)(long)ctx->data_meta;
+ meta = data_meta;
+
+ if (meta + 1 > data) {
+ __sync_add_and_fetch(&pkts_fail, 1);
+ return XDP_PASS;
+ }
+
+ meta->hint_valid = 0;
+
+ meta->xdp_timestamp = bpf_ktime_get_tai_ns();
+ err = bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp);
+ if (err)
+ meta->rx_timestamp_err = err;
+ else
+ meta->hint_valid |= XDP_META_FIELD_TS;
+
+ err = bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash,
+ &meta->rx_hash_type);
+ if (err)
+ meta->rx_hash_err = err;
+ else
+ meta->hint_valid |= XDP_META_FIELD_RSS;
+
+ err = bpf_xdp_metadata_rx_vlan_tag(ctx, &meta->rx_vlan_proto,
+ &meta->rx_vlan_tci);
+ if (err)
+ meta->rx_vlan_tag_err = err;
+ else
+ meta->hint_valid |= XDP_META_FIELD_VLAN_TAG;
+
+ __sync_add_and_fetch(&pkts_redir, 1);
+ return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_metadata.c b/tools/testing/selftests/bpf/progs/xdp_metadata.c
new file mode 100644
index 000000000000..09bb8a038d52
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_metadata.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "xdp_metadata.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_XSKMAP);
+ __uint(max_entries, 4);
+ __type(key, __u32);
+ __type(value, __u32);
+} xsk SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} prog_arr SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_devmap_val));
+ __uint(max_entries, 1);
+} dev_map SEC(".maps");
+
+extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx,
+ __u64 *timestamp) __ksym;
+extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
+ enum xdp_rss_hash_type *rss_type) __ksym;
+extern int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx,
+ __be16 *vlan_proto,
+ __u16 *vlan_tci) __ksym;
+
+SEC("xdp")
+int rx(struct xdp_md *ctx)
+{
+ void *data, *data_meta, *data_end;
+ struct ipv6hdr *ip6h = NULL;
+ struct ethhdr *eth = NULL;
+ struct udphdr *udp = NULL;
+ struct iphdr *iph = NULL;
+ struct xdp_meta *meta;
+ u64 timestamp = -1;
+ int ret;
+
+ data = (void *)(long)ctx->data;
+ data_end = (void *)(long)ctx->data_end;
+ eth = data;
+ if (eth + 1 < data_end) {
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ iph = (void *)(eth + 1);
+ if (iph + 1 < data_end && iph->protocol == IPPROTO_UDP)
+ udp = (void *)(iph + 1);
+ }
+ if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
+ ip6h = (void *)(eth + 1);
+ if (ip6h + 1 < data_end && ip6h->nexthdr == IPPROTO_UDP)
+ udp = (void *)(ip6h + 1);
+ }
+ if (udp && udp + 1 > data_end)
+ udp = NULL;
+ }
+
+ if (!udp)
+ return XDP_PASS;
+
+ /* Forwarding UDP:8080 to AF_XDP */
+ if (udp->dest != bpf_htons(8080))
+ return XDP_PASS;
+
+ /* Reserve enough for all custom metadata. */
+
+ ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(struct xdp_meta));
+ if (ret != 0)
+ return XDP_DROP;
+
+ data = (void *)(long)ctx->data;
+ data_meta = (void *)(long)ctx->data_meta;
+
+ if (data_meta + sizeof(struct xdp_meta) > data)
+ return XDP_DROP;
+
+ meta = data_meta;
+
+ /* Export metadata. */
+
+ /* We expect veth bpf_xdp_metadata_rx_timestamp to return 0 HW
+ * timestamp, so put some non-zero value into AF_XDP frame for
+ * the userspace.
+ */
+ bpf_xdp_metadata_rx_timestamp(ctx, &timestamp);
+ if (timestamp == 0)
+ meta->rx_timestamp = 1;
+
+ bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type);
+ bpf_xdp_metadata_rx_vlan_tag(ctx, &meta->rx_vlan_proto,
+ &meta->rx_vlan_tci);
+
+ return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS);
+}
+
+SEC("xdp")
+int redirect(struct xdp_md *ctx)
+{
+ return bpf_redirect_map(&dev_map, ctx->rx_queue_index, XDP_PASS);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_metadata2.c b/tools/testing/selftests/bpf/progs/xdp_metadata2.c
new file mode 100644
index 000000000000..85f88d9d7a78
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_metadata2.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "xdp_metadata.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
+ enum xdp_rss_hash_type *rss_type) __ksym;
+
+int called;
+
+SEC("freplace/rx")
+int freplace_rx(struct xdp_md *ctx)
+{
+ enum xdp_rss_hash_type type = 0;
+ u32 hash = 0;
+ /* Call _any_ metadata function to make sure we don't crash. */
+ bpf_xdp_metadata_rx_hash(ctx, &hash, &type);
+ called++;
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_redirect_map.c b/tools/testing/selftests/bpf/progs/xdp_redirect_map.c
index d037262c8937..50c8958f94e5 100644
--- a/tools/testing/selftests/bpf/progs/xdp_redirect_map.c
+++ b/tools/testing/selftests/bpf/progs/xdp_redirect_map.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/if_ether.h>
+
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP);
@@ -10,22 +13,107 @@ struct {
__uint(value_size, sizeof(int));
} tx_port SEC(".maps");
-SEC("redirect_map_0")
+SEC("xdp")
int xdp_redirect_map_0(struct xdp_md *xdp)
{
return bpf_redirect_map(&tx_port, 0, 0);
}
-SEC("redirect_map_1")
+SEC("xdp")
int xdp_redirect_map_1(struct xdp_md *xdp)
{
return bpf_redirect_map(&tx_port, 1, 0);
}
-SEC("redirect_map_2")
+SEC("xdp")
int xdp_redirect_map_2(struct xdp_md *xdp)
{
return bpf_redirect_map(&tx_port, 2, 0);
}
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 3);
+ __type(key, __u32);
+ __type(value, __u64);
+} rxcnt SEC(".maps");
+
+static int xdp_count(struct xdp_md *xdp, __u32 key)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eth = data;
+ __u64 *count;
+
+ if (data + sizeof(*eth) > data_end)
+ return XDP_DROP;
+
+ if (bpf_htons(eth->h_proto) == ETH_P_IP) {
+ /* We only count IPv4 packets */
+ count = bpf_map_lookup_elem(&rxcnt, &key);
+ if (count)
+ *count += 1;
+ }
+
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int xdp_count_0(struct xdp_md *xdp)
+{
+ return xdp_count(xdp, 0);
+}
+
+SEC("xdp")
+int xdp_count_1(struct xdp_md *xdp)
+{
+ return xdp_count(xdp, 1);
+}
+
+SEC("xdp")
+int xdp_count_2(struct xdp_md *xdp)
+{
+ return xdp_count(xdp, 2);
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __be64);
+} rx_mac SEC(".maps");
+
+static int store_mac(struct xdp_md *xdp, __u32 id)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eth = data;
+ __u32 key = id;
+ __be64 mac = 0;
+
+ if (data + sizeof(*eth) > data_end)
+ return XDP_DROP;
+
+ /* Only store IPv4 MAC to avoid being polluted by IPv6 packets */
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ __builtin_memcpy(&mac, eth->h_source, ETH_ALEN);
+ bpf_map_update_elem(&rx_mac, &key, &mac, 0);
+ bpf_printk("%s - %x", __func__, mac);
+ }
+
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int store_mac_1(struct xdp_md *xdp)
+{
+ return store_mac(xdp, 0);
+}
+
+SEC("xdp")
+int store_mac_2(struct xdp_md *xdp)
+{
+ return store_mac(xdp, 1);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c b/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c
new file mode 100644
index 000000000000..bc2945ed8a80
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+#define KBUILD_MODNAME "foo"
+#include <string.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+/* One map use devmap, another one use devmap_hash for testing */
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, 1024);
+} map_all SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(struct bpf_devmap_val));
+ __uint(max_entries, 128);
+} map_egress SEC(".maps");
+
+/* map to store egress interfaces mac addresses */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __be64);
+ __uint(max_entries, 128);
+} mac_map SEC(".maps");
+
+/* map to store redirect flags for each protocol*/
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u16);
+ __type(value, __u64);
+ __uint(max_entries, 16);
+} redirect_flags SEC(".maps");
+
+SEC("xdp")
+int xdp_redirect_map_multi_prog(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ int if_index = ctx->ingress_ifindex;
+ struct ethhdr *eth = data;
+ __u64 *flags_from_map;
+ __u16 h_proto;
+ __u64 nh_off;
+ __u64 flags;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+
+ h_proto = bpf_htons(eth->h_proto);
+
+ flags_from_map = bpf_map_lookup_elem(&redirect_flags, &h_proto);
+
+ /* Default flags for IPv4 : (BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS) */
+ if (h_proto == ETH_P_IP) {
+ flags = flags_from_map ? *flags_from_map : BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS;
+ return bpf_redirect_map(&map_all, 0, flags);
+ }
+ /* Default flags for IPv6 : 0 */
+ if (h_proto == ETH_P_IPV6) {
+ flags = flags_from_map ? *flags_from_map : 0;
+ return bpf_redirect_map(&map_all, if_index, flags);
+ }
+ /* Default flags for others BPF_F_BROADCAST : 0 */
+ else {
+ flags = flags_from_map ? *flags_from_map : BPF_F_BROADCAST;
+ return bpf_redirect_map(&map_all, 0, flags);
+ }
+}
+
+/* The following 2 progs are for 2nd devmap prog testing */
+SEC("xdp")
+int xdp_redirect_map_all_prog(struct xdp_md *ctx)
+{
+ return bpf_redirect_map(&map_egress, 0,
+ BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS);
+}
+
+SEC("xdp/devmap")
+int xdp_devmap_prog(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ __u32 key = ctx->egress_ifindex;
+ struct ethhdr *eth = data;
+ __u64 nh_off;
+ __be64 *mac;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+
+ mac = bpf_map_lookup_elem(&mac_map, &key);
+ if (mac)
+ __builtin_memcpy(eth->h_source, mac, ETH_ALEN);
+
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
new file mode 100644
index 000000000000..62b8e29ced9f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
@@ -0,0 +1,865 @@
+// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#define BPF_NO_KFUNC_PROTOTYPES
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <asm/errno.h>
+
+#include "bpf_compiler.h"
+
+#define TC_ACT_OK 0
+#define TC_ACT_SHOT 2
+
+#define NSEC_PER_SEC 1000000000L
+
+#define ETH_ALEN 6
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86DD
+
+#define tcp_flag_word(tp) (((union tcp_word_hdr *)(tp))->words[3])
+
+#define IP_MF 0x2000
+#define IP_OFFSET 0x1fff
+
+#define NEXTHDR_TCP 6
+
+#define TCPOPT_NOP 1
+#define TCPOPT_EOL 0
+#define TCPOPT_MSS 2
+#define TCPOPT_WINDOW 3
+#define TCPOPT_SACK_PERM 4
+#define TCPOPT_TIMESTAMP 8
+
+#define TCPOLEN_MSS 4
+#define TCPOLEN_WINDOW 3
+#define TCPOLEN_SACK_PERM 2
+#define TCPOLEN_TIMESTAMP 10
+
+#define TCP_TS_HZ 1000
+#define TS_OPT_WSCALE_MASK 0xf
+#define TS_OPT_SACK (1 << 4)
+#define TS_OPT_ECN (1 << 5)
+#define TSBITS 6
+#define TSMASK (((__u32)1 << TSBITS) - 1)
+#define TCP_MAX_WSCALE 14U
+
+#define IPV4_MAXLEN 60
+#define TCP_MAXLEN 60
+
+#define DEFAULT_MSS4 1460
+#define DEFAULT_MSS6 1440
+#define DEFAULT_WSCALE 7
+#define DEFAULT_TTL 64
+#define MAX_ALLOWED_PORTS 8
+
+#define MAX_PACKET_OFF 0xffff
+
+#define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(max_entries, 2);
+} values SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u16);
+ __uint(max_entries, MAX_ALLOWED_PORTS);
+} allowed_ports SEC(".maps");
+
+/* Some symbols defined in net/netfilter/nf_conntrack_bpf.c are unavailable in
+ * vmlinux.h if CONFIG_NF_CONNTRACK=m, so they are redefined locally.
+ */
+
+struct bpf_ct_opts___local {
+ s32 netns_id;
+ s32 error;
+ u8 l4proto;
+ u8 dir;
+ u8 reserved[2];
+} __attribute__((preserve_access_index));
+
+#define BPF_F_CURRENT_NETNS (-1)
+
+extern struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx,
+ struct bpf_sock_tuple *bpf_tuple,
+ __u32 len_tuple,
+ struct bpf_ct_opts___local *opts,
+ __u32 len_opts) __ksym;
+
+extern struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *skb_ctx,
+ struct bpf_sock_tuple *bpf_tuple,
+ u32 len_tuple,
+ struct bpf_ct_opts___local *opts,
+ u32 len_opts) __ksym;
+
+extern void bpf_ct_release(struct nf_conn *ct) __ksym;
+
+static __always_inline void swap_eth_addr(__u8 *a, __u8 *b)
+{
+ __u8 tmp[ETH_ALEN];
+
+ __builtin_memcpy(tmp, a, ETH_ALEN);
+ __builtin_memcpy(a, b, ETH_ALEN);
+ __builtin_memcpy(b, tmp, ETH_ALEN);
+}
+
+static __always_inline __u16 csum_fold(__u32 csum)
+{
+ csum = (csum & 0xffff) + (csum >> 16);
+ csum = (csum & 0xffff) + (csum >> 16);
+ return (__u16)~csum;
+}
+
+static __always_inline __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __u32 csum)
+{
+ __u64 s = csum;
+
+ s += (__u32)saddr;
+ s += (__u32)daddr;
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ s += proto + len;
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ s += (proto + len) << 8;
+#else
+#error Unknown endian
+#endif
+ s = (s & 0xffffffff) + (s >> 32);
+ s = (s & 0xffffffff) + (s >> 32);
+
+ return csum_fold((__u32)s);
+}
+
+static __always_inline __u16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __u32 csum)
+{
+ __u64 sum = csum;
+ int i;
+
+ __pragma_loop_unroll
+ for (i = 0; i < 4; i++)
+ sum += (__u32)saddr->in6_u.u6_addr32[i];
+
+ __pragma_loop_unroll
+ for (i = 0; i < 4; i++)
+ sum += (__u32)daddr->in6_u.u6_addr32[i];
+
+ /* Don't combine additions to avoid 32-bit overflow. */
+ sum += bpf_htonl(len);
+ sum += bpf_htonl(proto);
+
+ sum = (sum & 0xffffffff) + (sum >> 32);
+ sum = (sum & 0xffffffff) + (sum >> 32);
+
+ return csum_fold((__u32)sum);
+}
+
+static __always_inline __u64 tcp_clock_ns(void)
+{
+ return bpf_ktime_get_ns();
+}
+
+static __always_inline __u32 tcp_ns_to_ts(__u64 ns)
+{
+ return ns / (NSEC_PER_SEC / TCP_TS_HZ);
+}
+
+static __always_inline __u32 tcp_clock_ms(void)
+{
+ return tcp_ns_to_ts(tcp_clock_ns());
+}
+
+struct tcpopt_context {
+ void *data;
+ void *data_end;
+ __be32 *tsecr;
+ __u8 wscale;
+ bool option_timestamp;
+ bool option_sack;
+ __u32 off;
+};
+
+static __always_inline u8 *next(struct tcpopt_context *ctx, __u32 sz)
+{
+ __u64 off = ctx->off;
+ __u8 *data;
+
+ /* Verifier forbids access to packet when offset exceeds MAX_PACKET_OFF */
+ if (off > MAX_PACKET_OFF - sz)
+ return NULL;
+
+ data = ctx->data + off;
+ barrier_var(data);
+ if (data + sz >= ctx->data_end)
+ return NULL;
+
+ ctx->off += sz;
+ return data;
+}
+
+static int tscookie_tcpopt_parse(struct tcpopt_context *ctx)
+{
+ __u8 *opcode, *opsize, *wscale, *tsecr;
+ __u32 off = ctx->off;
+
+ opcode = next(ctx, 1);
+ if (!opcode)
+ return 1;
+
+ if (*opcode == TCPOPT_EOL)
+ return 1;
+ if (*opcode == TCPOPT_NOP)
+ return 0;
+
+ opsize = next(ctx, 1);
+ if (!opsize || *opsize < 2)
+ return 1;
+
+ switch (*opcode) {
+ case TCPOPT_WINDOW:
+ wscale = next(ctx, 1);
+ if (!wscale)
+ return 1;
+ if (*opsize == TCPOLEN_WINDOW)
+ ctx->wscale = *wscale < TCP_MAX_WSCALE ? *wscale : TCP_MAX_WSCALE;
+ break;
+ case TCPOPT_TIMESTAMP:
+ tsecr = next(ctx, 4);
+ if (!tsecr)
+ return 1;
+ if (*opsize == TCPOLEN_TIMESTAMP) {
+ ctx->option_timestamp = true;
+ /* Client's tsval becomes our tsecr. */
+ *ctx->tsecr = get_unaligned((__be32 *)tsecr);
+ }
+ break;
+ case TCPOPT_SACK_PERM:
+ if (*opsize == TCPOLEN_SACK_PERM)
+ ctx->option_sack = true;
+ break;
+ }
+
+ ctx->off = off + *opsize;
+
+ return 0;
+}
+
+static int tscookie_tcpopt_parse_batch(__u32 index, void *context)
+{
+ int i;
+
+ for (i = 0; i < 7; i++)
+ if (tscookie_tcpopt_parse(context))
+ return 1;
+ return 0;
+}
+
+static __always_inline bool tscookie_init(struct tcphdr *tcp_header,
+ __u16 tcp_len, __be32 *tsval,
+ __be32 *tsecr, void *data, void *data_end)
+{
+ struct tcpopt_context loop_ctx = {
+ .data = data,
+ .data_end = data_end,
+ .tsecr = tsecr,
+ .wscale = TS_OPT_WSCALE_MASK,
+ .option_timestamp = false,
+ .option_sack = false,
+ /* Note: currently verifier would track .off as unbound scalar.
+ * In case if verifier would at some point get smarter and
+ * compute bounded value for this var, beware that it might
+ * hinder bpf_loop() convergence validation.
+ */
+ .off = (__u8 *)(tcp_header + 1) - (__u8 *)data,
+ };
+ u32 cookie;
+
+ bpf_loop(6, tscookie_tcpopt_parse_batch, &loop_ctx, 0);
+
+ if (!loop_ctx.option_timestamp)
+ return false;
+
+ cookie = tcp_clock_ms() & ~TSMASK;
+ cookie |= loop_ctx.wscale & TS_OPT_WSCALE_MASK;
+ if (loop_ctx.option_sack)
+ cookie |= TS_OPT_SACK;
+ if (tcp_header->ece && tcp_header->cwr)
+ cookie |= TS_OPT_ECN;
+ *tsval = bpf_htonl(cookie);
+
+ return true;
+}
+
+static __always_inline void values_get_tcpipopts(__u16 *mss, __u8 *wscale,
+ __u8 *ttl, bool ipv6)
+{
+ __u32 key = 0;
+ __u64 *value;
+
+ value = bpf_map_lookup_elem(&values, &key);
+ if (value && *value != 0) {
+ if (ipv6)
+ *mss = (*value >> 32) & 0xffff;
+ else
+ *mss = *value & 0xffff;
+ *wscale = (*value >> 16) & 0xf;
+ *ttl = (*value >> 24) & 0xff;
+ return;
+ }
+
+ *mss = ipv6 ? DEFAULT_MSS6 : DEFAULT_MSS4;
+ *wscale = DEFAULT_WSCALE;
+ *ttl = DEFAULT_TTL;
+}
+
+static __always_inline void values_inc_synacks(void)
+{
+ __u32 key = 1;
+ __u64 *value;
+
+ value = bpf_map_lookup_elem(&values, &key);
+ if (value)
+ __sync_fetch_and_add(value, 1);
+}
+
+static __always_inline bool check_port_allowed(__u16 port)
+{
+ __u32 i;
+
+ for (i = 0; i < MAX_ALLOWED_PORTS; i++) {
+ __u32 key = i;
+ __u16 *value;
+
+ value = bpf_map_lookup_elem(&allowed_ports, &key);
+
+ if (!value)
+ break;
+ /* 0 is a terminator value. Check it first to avoid matching on
+ * a forbidden port == 0 and returning true.
+ */
+ if (*value == 0)
+ break;
+
+ if (*value == port)
+ return true;
+ }
+
+ return false;
+}
+
+struct header_pointers {
+ struct ethhdr *eth;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ struct tcphdr *tcp;
+ __u16 tcp_len;
+};
+
+static __always_inline int tcp_dissect(void *data, void *data_end,
+ struct header_pointers *hdr)
+{
+ hdr->eth = data;
+ if (hdr->eth + 1 > data_end)
+ return XDP_DROP;
+
+ switch (bpf_ntohs(hdr->eth->h_proto)) {
+ case ETH_P_IP:
+ hdr->ipv6 = NULL;
+
+ hdr->ipv4 = (void *)hdr->eth + sizeof(*hdr->eth);
+ if (hdr->ipv4 + 1 > data_end)
+ return XDP_DROP;
+ if (hdr->ipv4->ihl * 4 < sizeof(*hdr->ipv4))
+ return XDP_DROP;
+ if (hdr->ipv4->version != 4)
+ return XDP_DROP;
+
+ if (hdr->ipv4->protocol != IPPROTO_TCP)
+ return XDP_PASS;
+
+ hdr->tcp = (void *)hdr->ipv4 + hdr->ipv4->ihl * 4;
+ break;
+ case ETH_P_IPV6:
+ hdr->ipv4 = NULL;
+
+ hdr->ipv6 = (void *)hdr->eth + sizeof(*hdr->eth);
+ if (hdr->ipv6 + 1 > data_end)
+ return XDP_DROP;
+ if (hdr->ipv6->version != 6)
+ return XDP_DROP;
+
+ /* XXX: Extension headers are not supported and could circumvent
+ * XDP SYN flood protection.
+ */
+ if (hdr->ipv6->nexthdr != NEXTHDR_TCP)
+ return XDP_PASS;
+
+ hdr->tcp = (void *)hdr->ipv6 + sizeof(*hdr->ipv6);
+ break;
+ default:
+ /* XXX: VLANs will circumvent XDP SYN flood protection. */
+ return XDP_PASS;
+ }
+
+ if (hdr->tcp + 1 > data_end)
+ return XDP_DROP;
+ hdr->tcp_len = hdr->tcp->doff * 4;
+ if (hdr->tcp_len < sizeof(*hdr->tcp))
+ return XDP_DROP;
+
+ return XDP_TX;
+}
+
+static __always_inline int tcp_lookup(void *ctx, struct header_pointers *hdr, bool xdp)
+{
+ struct bpf_ct_opts___local ct_lookup_opts = {
+ .netns_id = BPF_F_CURRENT_NETNS,
+ .l4proto = IPPROTO_TCP,
+ };
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+ __u32 tup_size;
+
+ if (hdr->ipv4) {
+ /* TCP doesn't normally use fragments, and XDP can't reassemble
+ * them.
+ */
+ if ((hdr->ipv4->frag_off & bpf_htons(IP_MF | IP_OFFSET)) != 0)
+ return XDP_DROP;
+
+ tup.ipv4.saddr = hdr->ipv4->saddr;
+ tup.ipv4.daddr = hdr->ipv4->daddr;
+ tup.ipv4.sport = hdr->tcp->source;
+ tup.ipv4.dport = hdr->tcp->dest;
+ tup_size = sizeof(tup.ipv4);
+ } else if (hdr->ipv6) {
+ __builtin_memcpy(tup.ipv6.saddr, &hdr->ipv6->saddr, sizeof(tup.ipv6.saddr));
+ __builtin_memcpy(tup.ipv6.daddr, &hdr->ipv6->daddr, sizeof(tup.ipv6.daddr));
+ tup.ipv6.sport = hdr->tcp->source;
+ tup.ipv6.dport = hdr->tcp->dest;
+ tup_size = sizeof(tup.ipv6);
+ } else {
+ /* The verifier can't track that either ipv4 or ipv6 is not
+ * NULL.
+ */
+ return XDP_ABORTED;
+ }
+ if (xdp)
+ ct = bpf_xdp_ct_lookup(ctx, &tup, tup_size, &ct_lookup_opts, sizeof(ct_lookup_opts));
+ else
+ ct = bpf_skb_ct_lookup(ctx, &tup, tup_size, &ct_lookup_opts, sizeof(ct_lookup_opts));
+ if (ct) {
+ unsigned long status = ct->status;
+
+ bpf_ct_release(ct);
+ if (status & IPS_CONFIRMED)
+ return XDP_PASS;
+ } else if (ct_lookup_opts.error != -ENOENT) {
+ return XDP_ABORTED;
+ }
+
+ /* error == -ENOENT || !(status & IPS_CONFIRMED) */
+ return XDP_TX;
+}
+
+static __always_inline __u8 tcp_mkoptions(__be32 *buf, __be32 *tsopt, __u16 mss,
+ __u8 wscale)
+{
+ __be32 *start = buf;
+
+ *buf++ = bpf_htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
+
+ if (!tsopt)
+ return buf - start;
+
+ if (tsopt[0] & bpf_htonl(1 << 4))
+ *buf++ = bpf_htonl((TCPOPT_SACK_PERM << 24) |
+ (TCPOLEN_SACK_PERM << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
+ else
+ *buf++ = bpf_htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
+ *buf++ = tsopt[0];
+ *buf++ = tsopt[1];
+
+ if ((tsopt[0] & bpf_htonl(0xf)) != bpf_htonl(0xf))
+ *buf++ = bpf_htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_WINDOW << 16) |
+ (TCPOLEN_WINDOW << 8) |
+ wscale);
+
+ return buf - start;
+}
+
+static __always_inline void tcp_gen_synack(struct tcphdr *tcp_header,
+ __u32 cookie, __be32 *tsopt,
+ __u16 mss, __u8 wscale)
+{
+ void *tcp_options;
+
+ tcp_flag_word(tcp_header) = TCP_FLAG_SYN | TCP_FLAG_ACK;
+ if (tsopt && (tsopt[0] & bpf_htonl(1 << 5)))
+ tcp_flag_word(tcp_header) |= TCP_FLAG_ECE;
+ tcp_header->doff = 5; /* doff is part of tcp_flag_word. */
+ swap(tcp_header->source, tcp_header->dest);
+ tcp_header->ack_seq = bpf_htonl(bpf_ntohl(tcp_header->seq) + 1);
+ tcp_header->seq = bpf_htonl(cookie);
+ tcp_header->window = 0;
+ tcp_header->urg_ptr = 0;
+ tcp_header->check = 0; /* Calculate checksum later. */
+
+ tcp_options = (void *)(tcp_header + 1);
+ tcp_header->doff += tcp_mkoptions(tcp_options, tsopt, mss, wscale);
+}
+
+static __always_inline void tcpv4_gen_synack(struct header_pointers *hdr,
+ __u32 cookie, __be32 *tsopt)
+{
+ __u8 wscale;
+ __u16 mss;
+ __u8 ttl;
+
+ values_get_tcpipopts(&mss, &wscale, &ttl, false);
+
+ swap_eth_addr(hdr->eth->h_source, hdr->eth->h_dest);
+
+ swap(hdr->ipv4->saddr, hdr->ipv4->daddr);
+ hdr->ipv4->check = 0; /* Calculate checksum later. */
+ hdr->ipv4->tos = 0;
+ hdr->ipv4->id = 0;
+ hdr->ipv4->ttl = ttl;
+
+ tcp_gen_synack(hdr->tcp, cookie, tsopt, mss, wscale);
+
+ hdr->tcp_len = hdr->tcp->doff * 4;
+ hdr->ipv4->tot_len = bpf_htons(sizeof(*hdr->ipv4) + hdr->tcp_len);
+}
+
+static __always_inline void tcpv6_gen_synack(struct header_pointers *hdr,
+ __u32 cookie, __be32 *tsopt)
+{
+ __u8 wscale;
+ __u16 mss;
+ __u8 ttl;
+
+ values_get_tcpipopts(&mss, &wscale, &ttl, true);
+
+ swap_eth_addr(hdr->eth->h_source, hdr->eth->h_dest);
+
+ swap(hdr->ipv6->saddr, hdr->ipv6->daddr);
+ *(__be32 *)hdr->ipv6 = bpf_htonl(0x60000000);
+ hdr->ipv6->hop_limit = ttl;
+
+ tcp_gen_synack(hdr->tcp, cookie, tsopt, mss, wscale);
+
+ hdr->tcp_len = hdr->tcp->doff * 4;
+ hdr->ipv6->payload_len = bpf_htons(hdr->tcp_len);
+}
+
+static __always_inline int syncookie_handle_syn(struct header_pointers *hdr,
+ void *ctx,
+ void *data, void *data_end,
+ bool xdp)
+{
+ __u32 old_pkt_size, new_pkt_size;
+ /* Unlike clang 10, clang 11 and 12 generate code that doesn't pass the
+ * BPF verifier if tsopt is not volatile. Volatile forces it to store
+ * the pointer value and use it directly, otherwise tcp_mkoptions is
+ * (mis)compiled like this:
+ * if (!tsopt)
+ * return buf - start;
+ * reg = stored_return_value_of_tscookie_init;
+ * if (reg)
+ * tsopt = tsopt_buf;
+ * else
+ * tsopt = NULL;
+ * ...
+ * *buf++ = tsopt[1];
+ * It creates a dead branch where tsopt is assigned NULL, but the
+ * verifier can't prove it's dead and blocks the program.
+ */
+ __be32 * volatile tsopt = NULL;
+ __be32 tsopt_buf[2] = {};
+ __u16 ip_len;
+ __u32 cookie;
+ __s64 value;
+
+ /* Checksum is not yet verified, but both checksum failure and TCP
+ * header checks return XDP_DROP, so the order doesn't matter.
+ */
+ if (hdr->tcp->fin || hdr->tcp->rst)
+ return XDP_DROP;
+
+ /* Issue SYN cookies on allowed ports, drop SYN packets on blocked
+ * ports.
+ */
+ if (!check_port_allowed(bpf_ntohs(hdr->tcp->dest)))
+ return XDP_DROP;
+
+ if (hdr->ipv4) {
+ /* Check the IPv4 and TCP checksums before creating a SYNACK. */
+ value = bpf_csum_diff(0, 0, (void *)hdr->ipv4, hdr->ipv4->ihl * 4, 0);
+ if (value < 0)
+ return XDP_ABORTED;
+ if (csum_fold(value) != 0)
+ return XDP_DROP; /* Bad IPv4 checksum. */
+
+ value = bpf_csum_diff(0, 0, (void *)hdr->tcp, hdr->tcp_len, 0);
+ if (value < 0)
+ return XDP_ABORTED;
+ if (csum_tcpudp_magic(hdr->ipv4->saddr, hdr->ipv4->daddr,
+ hdr->tcp_len, IPPROTO_TCP, value) != 0)
+ return XDP_DROP; /* Bad TCP checksum. */
+
+ ip_len = sizeof(*hdr->ipv4);
+
+ value = bpf_tcp_raw_gen_syncookie_ipv4(hdr->ipv4, hdr->tcp,
+ hdr->tcp_len);
+ } else if (hdr->ipv6) {
+ /* Check the TCP checksum before creating a SYNACK. */
+ value = bpf_csum_diff(0, 0, (void *)hdr->tcp, hdr->tcp_len, 0);
+ if (value < 0)
+ return XDP_ABORTED;
+ if (csum_ipv6_magic(&hdr->ipv6->saddr, &hdr->ipv6->daddr,
+ hdr->tcp_len, IPPROTO_TCP, value) != 0)
+ return XDP_DROP; /* Bad TCP checksum. */
+
+ ip_len = sizeof(*hdr->ipv6);
+
+ value = bpf_tcp_raw_gen_syncookie_ipv6(hdr->ipv6, hdr->tcp,
+ hdr->tcp_len);
+ } else {
+ return XDP_ABORTED;
+ }
+
+ if (value < 0)
+ return XDP_ABORTED;
+ cookie = (__u32)value;
+
+ if (tscookie_init((void *)hdr->tcp, hdr->tcp_len,
+ &tsopt_buf[0], &tsopt_buf[1], data, data_end))
+ tsopt = tsopt_buf;
+
+ /* Check that there is enough space for a SYNACK. It also covers
+ * the check that the destination of the __builtin_memmove below
+ * doesn't overflow.
+ */
+ if (data + sizeof(*hdr->eth) + ip_len + TCP_MAXLEN > data_end)
+ return XDP_ABORTED;
+
+ if (hdr->ipv4) {
+ if (hdr->ipv4->ihl * 4 > sizeof(*hdr->ipv4)) {
+ struct tcphdr *new_tcp_header;
+
+ new_tcp_header = data + sizeof(*hdr->eth) + sizeof(*hdr->ipv4);
+ __builtin_memmove(new_tcp_header, hdr->tcp, sizeof(*hdr->tcp));
+ hdr->tcp = new_tcp_header;
+
+ hdr->ipv4->ihl = sizeof(*hdr->ipv4) / 4;
+ }
+
+ tcpv4_gen_synack(hdr, cookie, tsopt);
+ } else if (hdr->ipv6) {
+ tcpv6_gen_synack(hdr, cookie, tsopt);
+ } else {
+ return XDP_ABORTED;
+ }
+
+ /* Recalculate checksums. */
+ hdr->tcp->check = 0;
+ value = bpf_csum_diff(0, 0, (void *)hdr->tcp, hdr->tcp_len, 0);
+ if (value < 0)
+ return XDP_ABORTED;
+ if (hdr->ipv4) {
+ hdr->tcp->check = csum_tcpudp_magic(hdr->ipv4->saddr,
+ hdr->ipv4->daddr,
+ hdr->tcp_len,
+ IPPROTO_TCP,
+ value);
+
+ hdr->ipv4->check = 0;
+ value = bpf_csum_diff(0, 0, (void *)hdr->ipv4, sizeof(*hdr->ipv4), 0);
+ if (value < 0)
+ return XDP_ABORTED;
+ hdr->ipv4->check = csum_fold(value);
+ } else if (hdr->ipv6) {
+ hdr->tcp->check = csum_ipv6_magic(&hdr->ipv6->saddr,
+ &hdr->ipv6->daddr,
+ hdr->tcp_len,
+ IPPROTO_TCP,
+ value);
+ } else {
+ return XDP_ABORTED;
+ }
+
+ /* Set the new packet size. */
+ old_pkt_size = data_end - data;
+ new_pkt_size = sizeof(*hdr->eth) + ip_len + hdr->tcp->doff * 4;
+ if (xdp) {
+ if (bpf_xdp_adjust_tail(ctx, new_pkt_size - old_pkt_size))
+ return XDP_ABORTED;
+ } else {
+ if (bpf_skb_change_tail(ctx, new_pkt_size, 0))
+ return XDP_ABORTED;
+ }
+
+ values_inc_synacks();
+
+ return XDP_TX;
+}
+
+static __always_inline int syncookie_handle_ack(struct header_pointers *hdr)
+{
+ int err;
+
+ if (hdr->tcp->rst)
+ return XDP_DROP;
+
+ if (hdr->ipv4)
+ err = bpf_tcp_raw_check_syncookie_ipv4(hdr->ipv4, hdr->tcp);
+ else if (hdr->ipv6)
+ err = bpf_tcp_raw_check_syncookie_ipv6(hdr->ipv6, hdr->tcp);
+ else
+ return XDP_ABORTED;
+ if (err)
+ return XDP_DROP;
+
+ return XDP_PASS;
+}
+
+static __always_inline int syncookie_part1(void *ctx, void *data, void *data_end,
+ struct header_pointers *hdr, bool xdp)
+{
+ int ret;
+
+ ret = tcp_dissect(data, data_end, hdr);
+ if (ret != XDP_TX)
+ return ret;
+
+ ret = tcp_lookup(ctx, hdr, xdp);
+ if (ret != XDP_TX)
+ return ret;
+
+ /* Packet is TCP and doesn't belong to an established connection. */
+
+ if ((hdr->tcp->syn ^ hdr->tcp->ack) != 1)
+ return XDP_DROP;
+
+ /* Grow the TCP header to TCP_MAXLEN to be able to pass any hdr->tcp_len
+ * to bpf_tcp_raw_gen_syncookie_ipv{4,6} and pass the verifier.
+ */
+ if (xdp) {
+ if (bpf_xdp_adjust_tail(ctx, TCP_MAXLEN - hdr->tcp_len))
+ return XDP_ABORTED;
+ } else {
+ /* Without volatile the verifier throws this error:
+ * R9 32-bit pointer arithmetic prohibited
+ */
+ volatile u64 old_len = data_end - data;
+
+ if (bpf_skb_change_tail(ctx, old_len + TCP_MAXLEN - hdr->tcp_len, 0))
+ return XDP_ABORTED;
+ }
+
+ return XDP_TX;
+}
+
+static __always_inline int syncookie_part2(void *ctx, void *data, void *data_end,
+ struct header_pointers *hdr, bool xdp)
+{
+ if (hdr->ipv4) {
+ hdr->eth = data;
+ hdr->ipv4 = (void *)hdr->eth + sizeof(*hdr->eth);
+ /* IPV4_MAXLEN is needed when calculating checksum.
+ * At least sizeof(struct iphdr) is needed here to access ihl.
+ */
+ if ((void *)hdr->ipv4 + IPV4_MAXLEN > data_end)
+ return XDP_ABORTED;
+ hdr->tcp = (void *)hdr->ipv4 + hdr->ipv4->ihl * 4;
+ } else if (hdr->ipv6) {
+ hdr->eth = data;
+ hdr->ipv6 = (void *)hdr->eth + sizeof(*hdr->eth);
+ hdr->tcp = (void *)hdr->ipv6 + sizeof(*hdr->ipv6);
+ } else {
+ return XDP_ABORTED;
+ }
+
+ if ((void *)hdr->tcp + TCP_MAXLEN > data_end)
+ return XDP_ABORTED;
+
+ /* We run out of registers, tcp_len gets spilled to the stack, and the
+ * verifier forgets its min and max values checked above in tcp_dissect.
+ */
+ hdr->tcp_len = hdr->tcp->doff * 4;
+ if (hdr->tcp_len < sizeof(*hdr->tcp))
+ return XDP_ABORTED;
+
+ return hdr->tcp->syn ? syncookie_handle_syn(hdr, ctx, data, data_end, xdp) :
+ syncookie_handle_ack(hdr);
+}
+
+SEC("xdp")
+int syncookie_xdp(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct header_pointers hdr;
+ int ret;
+
+ ret = syncookie_part1(ctx, data, data_end, &hdr, true);
+ if (ret != XDP_TX)
+ return ret;
+
+ data_end = (void *)(long)ctx->data_end;
+ data = (void *)(long)ctx->data;
+
+ return syncookie_part2(ctx, data, data_end, &hdr, true);
+}
+
+SEC("tc")
+int syncookie_tc(struct __sk_buff *skb)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ struct header_pointers hdr;
+ int ret;
+
+ ret = syncookie_part1(skb, data, data_end, &hdr, false);
+ if (ret != XDP_TX)
+ return ret == XDP_PASS ? TC_ACT_OK : TC_ACT_SHOT;
+
+ data_end = (void *)(long)skb->data_end;
+ data = (void *)(long)skb->data;
+
+ ret = syncookie_part2(skb, data, data_end, &hdr, false);
+ switch (ret) {
+ case XDP_PASS:
+ return TC_ACT_OK;
+ case XDP_TX:
+ return bpf_redirect(skb->ifindex, 0);
+ default:
+ return TC_ACT_SHOT;
+ }
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_tx.c b/tools/testing/selftests/bpf/progs/xdp_tx.c
index 94e6c2b281cb..5f725c720e00 100644
--- a/tools/testing/selftests/bpf/progs/xdp_tx.c
+++ b/tools/testing/selftests/bpf/progs/xdp_tx.c
@@ -3,7 +3,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-SEC("tx")
+SEC("xdp")
int xdp_tx(struct xdp_md *xdp)
{
return XDP_TX;
diff --git a/tools/testing/selftests/bpf/progs/xdping_kern.c b/tools/testing/selftests/bpf/progs/xdping_kern.c
index 6b9ca40bd1f4..44e2b0ef23ae 100644
--- a/tools/testing/selftests/bpf/progs/xdping_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdping_kern.c
@@ -15,6 +15,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
#include "xdping.h"
struct {
@@ -86,10 +87,9 @@ static __always_inline int icmp_check(struct xdp_md *ctx, int type)
return XDP_TX;
}
-SEC("xdpclient")
+SEC("xdp")
int xdping_client(struct xdp_md *ctx)
{
- void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct pinginfo *pinginfo = NULL;
struct ethhdr *eth = data;
@@ -117,7 +117,7 @@ int xdping_client(struct xdp_md *ctx)
return XDP_PASS;
if (pinginfo->start) {
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < XDPING_MAX_COUNT; i++) {
if (pinginfo->times[i] == 0)
break;
@@ -150,10 +150,9 @@ int xdping_client(struct xdp_md *ctx)
return XDP_TX;
}
-SEC("xdpserver")
+SEC("xdp")
int xdping_server(struct xdp_md *ctx)
{
- void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct ethhdr *eth = data;
struct icmphdr *icmph;
diff --git a/tools/testing/selftests/bpf/progs/xdpwall.c b/tools/testing/selftests/bpf/progs/xdpwall.c
new file mode 100644
index 000000000000..c2dd0c28237a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdpwall.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <stdbool.h>
+#include <stdint.h>
+#include <linux/stddef.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/bpf.h>
+#include <linux/types.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+enum pkt_parse_err {
+ NO_ERR,
+ BAD_IP6_HDR,
+ BAD_IP4GUE_HDR,
+ BAD_IP6GUE_HDR,
+};
+
+enum pkt_flag {
+ TUNNEL = 0x1,
+ TCP_SYN = 0x2,
+ QUIC_INITIAL_FLAG = 0x4,
+ TCP_ACK = 0x8,
+ TCP_RST = 0x10
+};
+
+struct v4_lpm_key {
+ __u32 prefixlen;
+ __u32 src;
+};
+
+struct v4_lpm_val {
+ struct v4_lpm_key key;
+ __u8 val;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 16);
+ __type(key, struct in6_addr);
+ __type(value, bool);
+} v6_addr_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 16);
+ __type(key, __u32);
+ __type(value, bool);
+} v4_addr_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LPM_TRIE);
+ __uint(max_entries, 16);
+ __uint(key_size, sizeof(struct v4_lpm_key));
+ __uint(value_size, sizeof(struct v4_lpm_val));
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} v4_lpm_val_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 16);
+ __type(key, int);
+ __type(value, __u8);
+} tcp_port_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 16);
+ __type(key, int);
+ __type(value, __u16);
+} udp_port_map SEC(".maps");
+
+enum ip_type { V4 = 1, V6 = 2 };
+
+struct fw_match_info {
+ __u8 v4_src_ip_match;
+ __u8 v6_src_ip_match;
+ __u8 v4_src_prefix_match;
+ __u8 v4_dst_prefix_match;
+ __u8 tcp_dp_match;
+ __u16 udp_sp_match;
+ __u16 udp_dp_match;
+ bool is_tcp;
+ bool is_tcp_syn;
+};
+
+struct pkt_info {
+ enum ip_type type;
+ union {
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } ip;
+ int sport;
+ int dport;
+ __u16 trans_hdr_offset;
+ __u8 proto;
+ __u8 flags;
+};
+
+static __always_inline struct ethhdr *parse_ethhdr(void *data, void *data_end)
+{
+ struct ethhdr *eth = data;
+
+ if (eth + 1 > data_end)
+ return NULL;
+
+ return eth;
+}
+
+static __always_inline __u8 filter_ipv6_addr(const struct in6_addr *ipv6addr)
+{
+ __u8 *leaf;
+
+ leaf = bpf_map_lookup_elem(&v6_addr_map, ipv6addr);
+
+ return leaf ? *leaf : 0;
+}
+
+static __always_inline __u8 filter_ipv4_addr(const __u32 ipaddr)
+{
+ __u8 *leaf;
+
+ leaf = bpf_map_lookup_elem(&v4_addr_map, &ipaddr);
+
+ return leaf ? *leaf : 0;
+}
+
+static __always_inline __u8 filter_ipv4_lpm(const __u32 ipaddr)
+{
+ struct v4_lpm_key v4_key = {};
+ struct v4_lpm_val *lpm_val;
+
+ v4_key.src = ipaddr;
+ v4_key.prefixlen = 32;
+
+ lpm_val = bpf_map_lookup_elem(&v4_lpm_val_map, &v4_key);
+
+ return lpm_val ? lpm_val->val : 0;
+}
+
+
+static __always_inline void
+filter_src_dst_ip(struct pkt_info* info, struct fw_match_info* match_info)
+{
+ if (info->type == V6) {
+ match_info->v6_src_ip_match =
+ filter_ipv6_addr(&info->ip.ipv6->saddr);
+ } else if (info->type == V4) {
+ match_info->v4_src_ip_match =
+ filter_ipv4_addr(info->ip.ipv4->saddr);
+ match_info->v4_src_prefix_match =
+ filter_ipv4_lpm(info->ip.ipv4->saddr);
+ match_info->v4_dst_prefix_match =
+ filter_ipv4_lpm(info->ip.ipv4->daddr);
+ }
+}
+
+static __always_inline void *
+get_transport_hdr(__u16 offset, void *data, void *data_end)
+{
+ if (offset > 255 || data + offset > data_end)
+ return NULL;
+
+ return data + offset;
+}
+
+static __always_inline bool tcphdr_only_contains_flag(struct tcphdr *tcp,
+ __u32 FLAG)
+{
+ return (tcp_flag_word(tcp) &
+ (TCP_FLAG_ACK | TCP_FLAG_RST | TCP_FLAG_SYN | TCP_FLAG_FIN)) == FLAG;
+}
+
+static __always_inline void set_tcp_flags(struct pkt_info *info,
+ struct tcphdr *tcp) {
+ if (tcphdr_only_contains_flag(tcp, TCP_FLAG_SYN))
+ info->flags |= TCP_SYN;
+ else if (tcphdr_only_contains_flag(tcp, TCP_FLAG_ACK))
+ info->flags |= TCP_ACK;
+ else if (tcphdr_only_contains_flag(tcp, TCP_FLAG_RST))
+ info->flags |= TCP_RST;
+}
+
+static __always_inline bool
+parse_tcp(struct pkt_info *info, void *transport_hdr, void *data_end)
+{
+ struct tcphdr *tcp = transport_hdr;
+
+ if (tcp + 1 > data_end)
+ return false;
+
+ info->sport = bpf_ntohs(tcp->source);
+ info->dport = bpf_ntohs(tcp->dest);
+ set_tcp_flags(info, tcp);
+
+ return true;
+}
+
+static __always_inline bool
+parse_udp(struct pkt_info *info, void *transport_hdr, void *data_end)
+{
+ struct udphdr *udp = transport_hdr;
+
+ if (udp + 1 > data_end)
+ return false;
+
+ info->sport = bpf_ntohs(udp->source);
+ info->dport = bpf_ntohs(udp->dest);
+
+ return true;
+}
+
+static __always_inline __u8 filter_tcp_port(int port)
+{
+ __u8 *leaf = bpf_map_lookup_elem(&tcp_port_map, &port);
+
+ return leaf ? *leaf : 0;
+}
+
+static __always_inline __u16 filter_udp_port(int port)
+{
+ __u16 *leaf = bpf_map_lookup_elem(&udp_port_map, &port);
+
+ return leaf ? *leaf : 0;
+}
+
+static __always_inline bool
+filter_transport_hdr(void *transport_hdr, void *data_end,
+ struct pkt_info *info, struct fw_match_info *match_info)
+{
+ if (info->proto == IPPROTO_TCP) {
+ if (!parse_tcp(info, transport_hdr, data_end))
+ return false;
+
+ match_info->is_tcp = true;
+ match_info->is_tcp_syn = (info->flags & TCP_SYN) > 0;
+
+ match_info->tcp_dp_match = filter_tcp_port(info->dport);
+ } else if (info->proto == IPPROTO_UDP) {
+ if (!parse_udp(info, transport_hdr, data_end))
+ return false;
+
+ match_info->udp_dp_match = filter_udp_port(info->dport);
+ match_info->udp_sp_match = filter_udp_port(info->sport);
+ }
+
+ return true;
+}
+
+static __always_inline __u8
+parse_gue_v6(struct pkt_info *info, struct ipv6hdr *ip6h, void *data_end)
+{
+ struct udphdr *udp = (struct udphdr *)(ip6h + 1);
+ void *encap_data = udp + 1;
+
+ if (udp + 1 > data_end)
+ return BAD_IP6_HDR;
+
+ if (udp->dest != bpf_htons(6666))
+ return NO_ERR;
+
+ info->flags |= TUNNEL;
+
+ if (encap_data + 1 > data_end)
+ return BAD_IP6GUE_HDR;
+
+ if (*(__u8 *)encap_data & 0x30) {
+ struct ipv6hdr *inner_ip6h = encap_data;
+
+ if (inner_ip6h + 1 > data_end)
+ return BAD_IP6GUE_HDR;
+
+ info->type = V6;
+ info->proto = inner_ip6h->nexthdr;
+ info->ip.ipv6 = inner_ip6h;
+ info->trans_hdr_offset += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
+ } else {
+ struct iphdr *inner_ip4h = encap_data;
+
+ if (inner_ip4h + 1 > data_end)
+ return BAD_IP6GUE_HDR;
+
+ info->type = V4;
+ info->proto = inner_ip4h->protocol;
+ info->ip.ipv4 = inner_ip4h;
+ info->trans_hdr_offset += sizeof(struct iphdr) + sizeof(struct udphdr);
+ }
+
+ return NO_ERR;
+}
+
+static __always_inline __u8 parse_ipv6_gue(struct pkt_info *info,
+ void *data, void *data_end)
+{
+ struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
+
+ if (ip6h + 1 > data_end)
+ return BAD_IP6_HDR;
+
+ info->proto = ip6h->nexthdr;
+ info->ip.ipv6 = ip6h;
+ info->type = V6;
+ info->trans_hdr_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
+
+ if (info->proto == IPPROTO_UDP)
+ return parse_gue_v6(info, ip6h, data_end);
+
+ return NO_ERR;
+}
+
+SEC("xdp")
+int edgewall(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)(ctx->data_end);
+ void *data = (void *)(long)(ctx->data);
+ struct fw_match_info match_info = {};
+ struct pkt_info info = {};
+ void *transport_hdr;
+ struct ethhdr *eth;
+ bool filter_res;
+ __u32 proto;
+
+ eth = parse_ethhdr(data, data_end);
+ if (!eth)
+ return XDP_DROP;
+
+ proto = eth->h_proto;
+ if (proto != bpf_htons(ETH_P_IPV6))
+ return XDP_DROP;
+
+ if (parse_ipv6_gue(&info, data, data_end))
+ return XDP_DROP;
+
+ if (info.proto == IPPROTO_ICMPV6)
+ return XDP_PASS;
+
+ if (info.proto != IPPROTO_TCP && info.proto != IPPROTO_UDP)
+ return XDP_DROP;
+
+ filter_src_dst_ip(&info, &match_info);
+
+ transport_hdr = get_transport_hdr(info.trans_hdr_offset, data,
+ data_end);
+ if (!transport_hdr)
+ return XDP_DROP;
+
+ filter_res = filter_transport_hdr(transport_hdr, data_end,
+ &info, &match_info);
+ if (!filter_res)
+ return XDP_DROP;
+
+ if (match_info.is_tcp && !match_info.is_tcp_syn)
+ return XDP_PASS;
+
+ return XDP_DROP;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xfrm_info.c b/tools/testing/selftests/bpf/progs/xfrm_info.c
new file mode 100644
index 000000000000..a1d9f106c3f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xfrm_info.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+
+struct bpf_xfrm_info___local {
+ u32 if_id;
+ int link;
+} __attribute__((preserve_access_index));
+
+__u32 req_if_id;
+__u32 resp_if_id;
+
+int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx,
+ const struct bpf_xfrm_info___local *from) __ksym;
+int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx,
+ struct bpf_xfrm_info___local *to) __ksym;
+
+SEC("tc")
+int set_xfrm_info(struct __sk_buff *skb)
+{
+ struct bpf_xfrm_info___local info = { .if_id = req_if_id };
+
+ return bpf_skb_set_xfrm_info(skb, &info) ? TC_ACT_SHOT : TC_ACT_UNSPEC;
+}
+
+SEC("tc")
+int get_xfrm_info(struct __sk_buff *skb)
+{
+ struct bpf_xfrm_info___local info = {};
+
+ if (bpf_skb_get_xfrm_info(skb, &info) < 0)
+ return TC_ACT_SHOT;
+
+ resp_if_id = info.if_id;
+
+ return TC_ACT_UNSPEC;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
new file mode 100644
index 000000000000..683306db8594
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Intel */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/errno.h>
+#include "xsk_xdp_common.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_XSKMAP);
+ __uint(max_entries, 2);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+} xsk SEC(".maps");
+
+static unsigned int idx;
+int adjust_value = 0;
+int count = 0;
+
+SEC("xdp.frags") int xsk_def_prog(struct xdp_md *xdp)
+{
+ return bpf_redirect_map(&xsk, 0, XDP_DROP);
+}
+
+SEC("xdp.frags") int xsk_xdp_drop(struct xdp_md *xdp)
+{
+ /* Drop every other packet */
+ if (idx++ % 2)
+ return XDP_DROP;
+
+ return bpf_redirect_map(&xsk, 0, XDP_DROP);
+}
+
+SEC("xdp.frags") int xsk_xdp_populate_metadata(struct xdp_md *xdp)
+{
+ void *data, *data_meta;
+ struct xdp_info *meta;
+ int err;
+
+ /* Reserve enough for all custom metadata. */
+ err = bpf_xdp_adjust_meta(xdp, -(int)sizeof(struct xdp_info));
+ if (err)
+ return XDP_DROP;
+
+ data = (void *)(long)xdp->data;
+ data_meta = (void *)(long)xdp->data_meta;
+
+ if (data_meta + sizeof(struct xdp_info) > data)
+ return XDP_DROP;
+
+ meta = data_meta;
+ meta->count = count++;
+
+ return bpf_redirect_map(&xsk, 0, XDP_DROP);
+}
+
+SEC("xdp") int xsk_xdp_shared_umem(struct xdp_md *xdp)
+{
+ void *data = (void *)(long)xdp->data;
+ void *data_end = (void *)(long)xdp->data_end;
+ struct ethhdr *eth = data;
+
+ if (eth + 1 > data_end)
+ return XDP_DROP;
+
+ /* Redirecting packets based on the destination MAC address */
+ idx = ((unsigned int)(eth->h_dest[5])) / 2;
+ if (idx > MAX_SOCKETS)
+ return XDP_DROP;
+
+ return bpf_redirect_map(&xsk, idx, XDP_DROP);
+}
+
+SEC("xdp.frags") int xsk_xdp_adjust_tail(struct xdp_md *xdp)
+{
+ __u32 buff_len, curr_buff_len;
+ int ret;
+
+ buff_len = bpf_xdp_get_buff_len(xdp);
+ if (buff_len == 0)
+ return XDP_DROP;
+
+ ret = bpf_xdp_adjust_tail(xdp, adjust_value);
+ if (ret < 0) {
+ /* Handle unsupported cases */
+ if (ret == -EOPNOTSUPP) {
+ /* Set adjust_value to -EOPNOTSUPP to indicate to userspace that this case
+ * is unsupported
+ */
+ adjust_value = -EOPNOTSUPP;
+ return bpf_redirect_map(&xsk, 0, XDP_DROP);
+ }
+
+ return XDP_DROP;
+ }
+
+ curr_buff_len = bpf_xdp_get_buff_len(xdp);
+ if (curr_buff_len != buff_len + adjust_value)
+ return XDP_DROP;
+
+ if (curr_buff_len > buff_len) {
+ __u32 *pkt_data = (void *)(long)xdp->data;
+ __u32 len, words_to_end, seq_num;
+
+ len = curr_buff_len - PKT_HDR_ALIGN;
+ words_to_end = len / sizeof(*pkt_data) - 1;
+ seq_num = words_to_end;
+
+ /* Convert sequence number to network byte order. Store this in the last 4 bytes of
+ * the packet. Use 'adjust_value' to determine the position at the end of the
+ * packet for storing the sequence number.
+ */
+ seq_num = __constant_htonl(words_to_end);
+ bpf_xdp_store_bytes(xdp, curr_buff_len - sizeof(seq_num), &seq_num,
+ sizeof(seq_num));
+ }
+
+ return bpf_redirect_map(&xsk, 0, XDP_DROP);
+}
+
+char _license[] SEC("license") = "GPL";