aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst25
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst13
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool14
-rw-r--r--tools/bpf/bpftool/common.c2
-rw-r--r--tools/bpf/bpftool/feature.c152
-rw-r--r--tools/bpf/bpftool/gen.c587
-rw-r--r--tools/bpf/bpftool/main.h2
-rw-r--r--tools/bpf/bpftool/map.c9
-rw-r--r--tools/bpf/bpftool/pids.c8
-rw-r--r--tools/bpf/bpftool/prog.c2
-rw-r--r--tools/bpf/bpftool/skeleton/pid_iter.bpf.c22
-rw-r--r--tools/bpf/bpftool/skeleton/pid_iter.h2
-rw-r--r--tools/include/uapi/linux/bpf.h72
-rw-r--r--tools/lib/bpf/bpf.c13
-rw-r--r--tools/lib/bpf/bpf.h12
-rw-r--r--tools/lib/bpf/libbpf.c720
-rw-r--r--tools/lib/bpf/libbpf.h161
-rw-r--r--tools/lib/bpf/libbpf.map9
-rw-r--r--tools/lib/bpf/libbpf_internal.h5
-rw-r--r--tools/lib/bpf/libbpf_legacy.h4
-rw-r--r--tools/lib/bpf/libbpf_version.h2
-rw-r--r--tools/lib/bpf/xsk.c15
-rw-r--r--tools/scripts/Makefile.include4
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile20
-rw-r--r--tools/testing/selftests/bpf/README.rst10
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c14
-rw-r--r--tools/testing/selftests/bpf/cap_helpers.c67
-rw-r--r--tools/testing/selftests/bpf/cap_helpers.h19
-rwxr-xr-xtools/testing/selftests/bpf/ima_setup.sh35
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c86
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bind_perm.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c179
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_tag.c164
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c176
-rw-r--r--tools/testing/selftests/bpf/prog_tests/find_vma.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_data.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c323
-rw-r--r--tools/testing/selftests/bpf/prog_tests/obj_name.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_branches.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_link.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c63
-rw-r--r--tools/testing/selftests/bpf/prog_tests/subprogs.c77
-rw-r--r--tools/testing/selftests/bpf/prog_tests/subskeleton.c78
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c89
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_ima.c149
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c201
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c66
-rw-r--r--tools/testing/selftests/bpf/progs/ima.c66
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi.c100
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage.c19
-rw-r--r--tools/testing/selftests/bpf/progs/stacktrace_map_skip.c68
-rw-r--r--tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c63
-rw-r--r--tools/testing/selftests/bpf/progs/test_send_signal_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_subskeleton.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_subskeleton_lib.c61
-rw-r--r--tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_dtime.c38
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c100
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_lirc_mode2.sh5
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c4
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh10
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c6
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c4
-rwxr-xr-xtools/testing/selftests/bpf/test_tunnel.sh2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c88
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c7
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_deduction.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c83
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx.c8
78 files changed, 4011 insertions, 600 deletions
diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
index 18d646b571ec..68454ef28f58 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
@@ -25,6 +25,7 @@ GEN COMMANDS
| **bpftool** **gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...]
| **bpftool** **gen skeleton** *FILE* [**name** *OBJECT_NAME*]
+| **bpftool** **gen subskeleton** *FILE* [**name** *OBJECT_NAME*]
| **bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...]
| **bpftool** **gen help**
@@ -150,6 +151,30 @@ DESCRIPTION
(non-read-only) data from userspace, with same simplicity
as for BPF side.
+ **bpftool gen subskeleton** *FILE*
+ Generate BPF subskeleton C header file for a given *FILE*.
+
+ Subskeletons are similar to skeletons, except they do not own
+ the corresponding maps, programs, or global variables. They
+ require that the object file used to generate them is already
+ loaded into a *bpf_object* by some other means.
+
+ This functionality is useful when a library is included into a
+ larger BPF program. A subskeleton for the library would have
+ access to all objects and globals defined in it, without
+ having to know about the larger program.
+
+ Consequently, there are only two functions defined
+ for subskeletons:
+
+ - **example__open(bpf_object\*)**
+ Instantiates a subskeleton from an already opened (but not
+ necessarily loaded) **bpf_object**.
+
+ - **example__destroy()**
+ Frees the storage for the subskeleton but *does not* unload
+ any BPF programs or maps.
+
**bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...]
Generate a minimum BTF file as *OUTPUT*, derived from a given
*INPUT* BTF file, containing all needed BTF types so one, or
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 7084dd9fa2f8..6965c94dfdaf 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -20,7 +20,8 @@ SYNOPSIS
**bpftool** **version**
- *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** }
+ *OBJECT* := { **map** | **program** | **link** | **cgroup** | **perf** | **net** | **feature** |
+ **btf** | **gen** | **struct_ops** | **iter** }
*OPTIONS* := { { **-V** | **--version** } | |COMMON_OPTIONS| }
@@ -31,6 +32,8 @@ SYNOPSIS
*PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin** |
**load** | **attach** | **detach** | **help** }
+ *LINK-COMMANDS* := { **show** | **list** | **pin** | **detach** | **help** }
+
*CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** }
*PERF-COMMANDS* := { **show** | **list** | **help** }
@@ -39,6 +42,14 @@ SYNOPSIS
*FEATURE-COMMANDS* := { **probe** | **help** }
+ *BTF-COMMANDS* := { **show** | **list** | **dump** | **help** }
+
+ *GEN-COMMANDS* := { **object** | **skeleton** | **min_core_btf** | **help** }
+
+ *STRUCT-OPS-COMMANDS* := { **show** | **list** | **dump** | **register** | **unregister** | **help** }
+
+ *ITER-COMMANDS* := { **pin** | **help** }
+
DESCRIPTION
===========
*bpftool* allows for inspection and simple modification of BPF objects
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 958e1fd71b5c..5df8d72c5179 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -1003,13 +1003,25 @@ _bpftool()
;;
esac
;;
+ subskeleton)
+ case $prev in
+ $command)
+ _filedir
+ return 0
+ ;;
+ *)
+ _bpftool_once_attr 'name'
+ return 0
+ ;;
+ esac
+ ;;
min_core_btf)
_filedir
return 0
;;
*)
[[ $prev == $object ]] && \
- COMPREPLY=( $( compgen -W 'object skeleton help min_core_btf' -- "$cur" ) )
+ COMPREPLY=( $( compgen -W 'object skeleton subskeleton help min_core_btf' -- "$cur" ) )
;;
esac
;;
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 606743c6db41..0c1e06cf50b9 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -56,7 +56,6 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = {
[BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6",
[BPF_CGROUP_GETSOCKOPT] = "getsockopt",
[BPF_CGROUP_SETSOCKOPT] = "setsockopt",
-
[BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
[BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
[BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
@@ -76,6 +75,7 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = {
[BPF_SK_REUSEPORT_SELECT] = "sk_skb_reuseport_select",
[BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_skb_reuseport_select_or_migrate",
[BPF_PERF_EVENT] = "perf_event",
+ [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
};
void p_err(const char *fmt, ...)
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 9c894b1447de..c2f43a5d38e0 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -3,6 +3,7 @@
#include <ctype.h>
#include <errno.h>
+#include <fcntl.h>
#include <string.h>
#include <unistd.h>
#include <net/if.h>
@@ -45,6 +46,11 @@ static bool run_as_unprivileged;
/* Miscellaneous utility functions */
+static bool grep(const char *buffer, const char *pattern)
+{
+ return !!strstr(buffer, pattern);
+}
+
static bool check_procfs(void)
{
struct statfs st_fs;
@@ -135,6 +141,32 @@ static void print_end_section(void)
/* Probing functions */
+static int get_vendor_id(int ifindex)
+{
+ char ifname[IF_NAMESIZE], path[64], buf[8];
+ ssize_t len;
+ int fd;
+
+ if (!if_indextoname(ifindex, ifname))
+ return -1;
+
+ snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
+
+ fd = open(path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0)
+ return -1;
+
+ len = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (len < 0)
+ return -1;
+ if (len >= (ssize_t)sizeof(buf))
+ return -1;
+ buf[len] = '\0';
+
+ return strtol(buf, NULL, 0);
+}
+
static int read_procfs(const char *path)
{
char *endptr, *line = NULL;
@@ -478,6 +510,40 @@ static bool probe_bpf_syscall(const char *define_prefix)
return res;
}
+static bool
+probe_prog_load_ifindex(enum bpf_prog_type prog_type,
+ const struct bpf_insn *insns, size_t insns_cnt,
+ char *log_buf, size_t log_buf_sz,
+ __u32 ifindex)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .log_buf = log_buf,
+ .log_size = log_buf_sz,
+ .log_level = log_buf ? 1 : 0,
+ .prog_ifindex = ifindex,
+ );
+ int fd;
+
+ errno = 0;
+ fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts);
+ if (fd >= 0)
+ close(fd);
+
+ return fd >= 0 && errno != EINVAL && errno != EOPNOTSUPP;
+}
+
+static bool probe_prog_type_ifindex(enum bpf_prog_type prog_type, __u32 ifindex)
+{
+ /* nfp returns -EINVAL on exit(0) with TC offload */
+ struct bpf_insn insns[2] = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN()
+ };
+
+ return probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns),
+ NULL, 0, ifindex);
+}
+
static void
probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
const char *define_prefix, __u32 ifindex)
@@ -488,11 +554,19 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
bool res;
if (ifindex) {
- p_info("BPF offload feature probing is not supported");
- return;
+ switch (prog_type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_XDP:
+ break;
+ default:
+ return;
+ }
+
+ res = probe_prog_type_ifindex(prog_type, ifindex);
+ } else {
+ res = libbpf_probe_bpf_prog_type(prog_type, NULL);
}
- res = libbpf_probe_bpf_prog_type(prog_type, NULL);
#ifdef USE_LIBCAP
/* Probe may succeed even if program load fails, for unprivileged users
* check that we did not fail because of insufficient permissions
@@ -521,6 +595,26 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
define_prefix);
}
+static bool probe_map_type_ifindex(enum bpf_map_type map_type, __u32 ifindex)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
+ int key_size, value_size, max_entries;
+ int fd;
+
+ opts.map_ifindex = ifindex;
+
+ key_size = sizeof(__u32);
+ value_size = sizeof(__u32);
+ max_entries = 1;
+
+ fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries,
+ &opts);
+ if (fd >= 0)
+ close(fd);
+
+ return fd >= 0;
+}
+
static void
probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
__u32 ifindex)
@@ -531,11 +625,18 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
bool res;
if (ifindex) {
- p_info("BPF offload feature probing is not supported");
- return;
- }
+ switch (map_type) {
+ case BPF_MAP_TYPE_HASH:
+ case BPF_MAP_TYPE_ARRAY:
+ break;
+ default:
+ return;
+ }
- res = libbpf_probe_bpf_map_type(map_type, NULL);
+ res = probe_map_type_ifindex(map_type, ifindex);
+ } else {
+ res = libbpf_probe_bpf_map_type(map_type, NULL);
+ }
/* Probe result depends on the success of map creation, no additional
* check required for unprivileged users
@@ -559,6 +660,33 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
define_prefix);
}
+static bool
+probe_helper_ifindex(enum bpf_func_id id, enum bpf_prog_type prog_type,
+ __u32 ifindex)
+{
+ struct bpf_insn insns[2] = {
+ BPF_EMIT_CALL(id),
+ BPF_EXIT_INSN()
+ };
+ char buf[4096] = {};
+ bool res;
+
+ probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns), buf,
+ sizeof(buf), ifindex);
+ res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
+
+ switch (get_vendor_id(ifindex)) {
+ case 0x19ee: /* Netronome specific */
+ res = res && !grep(buf, "not supported by FW") &&
+ !grep(buf, "unsupported function id");
+ break;
+ default:
+ break;
+ }
+
+ return res;
+}
+
static void
probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
const char *define_prefix, unsigned int id,
@@ -567,12 +695,10 @@ probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
bool res = false;
if (supported_type) {
- if (ifindex) {
- p_info("BPF offload feature probing is not supported");
- return;
- }
-
- res = libbpf_probe_bpf_helper(prog_type, id, NULL);
+ if (ifindex)
+ res = probe_helper_ifindex(id, prog_type, ifindex);
+ else
+ res = libbpf_probe_bpf_helper(prog_type, id, NULL);
#ifdef USE_LIBCAP
/* Probe may succeed even if program load fails, for
* unprivileged users check that we did not fail because of
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 145734b4fe41..7ba7ff55d2ea 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -64,11 +64,11 @@ static void get_obj_name(char *name, const char *file)
sanitize_identifier(name);
}
-static void get_header_guard(char *guard, const char *obj_name)
+static void get_header_guard(char *guard, const char *obj_name, const char *suffix)
{
int i;
- sprintf(guard, "__%s_SKEL_H__", obj_name);
+ sprintf(guard, "__%s_%s__", obj_name, suffix);
for (i = 0; guard[i]; i++)
guard[i] = toupper(guard[i]);
}
@@ -231,6 +231,17 @@ static const struct btf_type *find_type_for_map(struct btf *btf, const char *map
return NULL;
}
+static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
+{
+ if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
+ return false;
+
+ if (!get_map_ident(map, buf, sz))
+ return false;
+
+ return true;
+}
+
static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
{
struct btf *btf = bpf_object__btf(obj);
@@ -247,12 +258,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
bpf_object__for_each_map(map, obj) {
/* only generate definitions for memory-mapped internal maps */
- if (!bpf_map__is_internal(map))
- continue;
- if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
- continue;
-
- if (!get_map_ident(map, map_ident, sizeof(map_ident)))
+ if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
continue;
sec = find_type_for_map(btf, map_ident);
@@ -280,6 +286,96 @@ out:
return err;
}
+static bool btf_is_ptr_to_func_proto(const struct btf *btf,
+ const struct btf_type *v)
+{
+ return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
+}
+
+static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name)
+{
+ struct btf *btf = bpf_object__btf(obj);
+ struct btf_dump *d;
+ struct bpf_map *map;
+ const struct btf_type *sec, *var;
+ const struct btf_var_secinfo *sec_var;
+ int i, err = 0, vlen;
+ char map_ident[256], sec_ident[256];
+ bool strip_mods = false, needs_typeof = false;
+ const char *sec_name, *var_name;
+ __u32 var_type_id;
+
+ d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
+ if (!d)
+ return -errno;
+
+ bpf_object__for_each_map(map, obj) {
+ /* only generate definitions for memory-mapped internal maps */
+ if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
+ continue;
+
+ sec = find_type_for_map(btf, map_ident);
+ if (!sec)
+ continue;
+
+ sec_name = btf__name_by_offset(btf, sec->name_off);
+ if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
+ continue;
+
+ strip_mods = strcmp(sec_name, ".kconfig") != 0;
+ printf(" struct %s__%s {\n", obj_name, sec_ident);
+
+ sec_var = btf_var_secinfos(sec);
+ vlen = btf_vlen(sec);
+ for (i = 0; i < vlen; i++, sec_var++) {
+ DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
+ .indent_level = 2,
+ .strip_mods = strip_mods,
+ /* we'll print the name separately */
+ .field_name = "",
+ );
+
+ var = btf__type_by_id(btf, sec_var->type);
+ var_name = btf__name_by_offset(btf, var->name_off);
+ var_type_id = var->type;
+
+ /* static variables are not exposed through BPF skeleton */
+ if (btf_var(var)->linkage == BTF_VAR_STATIC)
+ continue;
+
+ /* The datasec member has KIND_VAR but we want the
+ * underlying type of the variable (e.g. KIND_INT).
+ */
+ var = skip_mods_and_typedefs(btf, var->type, NULL);
+
+ printf("\t\t");
+ /* Func and array members require special handling.
+ * Instead of producing `typename *var`, they produce
+ * `typeof(typename) *var`. This allows us to keep a
+ * similar syntax where the identifier is just prefixed
+ * by *, allowing us to ignore C declaration minutiae.
+ */
+ needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var);
+ if (needs_typeof)
+ printf("typeof(");
+
+ err = btf_dump__emit_type_decl(d, var_type_id, &opts);
+ if (err)
+ goto out;
+
+ if (needs_typeof)
+ printf(")");
+
+ printf(" *%s;\n", var_name);
+ }
+ printf(" } %s;\n", sec_ident);
+ }
+
+out:
+ btf_dump__free(d);
+ return err;
+}
+
static void codegen(const char *template, ...)
{
const char *src, *end;
@@ -389,11 +485,7 @@ static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
", obj_name);
bpf_object__for_each_map(map, obj) {
- if (!bpf_map__is_internal(map))
- continue;
- if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
- continue;
- if (!get_map_ident(map, map_ident, sizeof(map_ident)))
+ if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
continue;
sec = find_type_for_map(btf, map_ident);
@@ -608,11 +700,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
const void *mmap_data = NULL;
size_t mmap_size = 0;
- if (!get_map_ident(map, ident, sizeof(ident)))
- continue;
-
- if (!bpf_map__is_internal(map) ||
- !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
+ if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
continue;
codegen("\
@@ -671,11 +759,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
bpf_object__for_each_map(map, obj) {
const char *mmap_flags;
- if (!get_map_ident(map, ident, sizeof(ident)))
- continue;
-
- if (!bpf_map__is_internal(map) ||
- !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
+ if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
continue;
if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
@@ -727,10 +811,95 @@ out:
return err;
}
+static void
+codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
+{
+ struct bpf_map *map;
+ char ident[256];
+ size_t i;
+
+ if (!map_cnt)
+ return;
+
+ codegen("\
+ \n\
+ \n\
+ /* maps */ \n\
+ s->map_cnt = %zu; \n\
+ s->map_skel_sz = sizeof(*s->maps); \n\
+ s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
+ if (!s->maps) \n\
+ goto err; \n\
+ ",
+ map_cnt
+ );
+ i = 0;
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+
+ codegen("\
+ \n\
+ \n\
+ s->maps[%zu].name = \"%s\"; \n\
+ s->maps[%zu].map = &obj->maps.%s; \n\
+ ",
+ i, bpf_map__name(map), i, ident);
+ /* memory-mapped internal maps */
+ if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) {
+ printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
+ i, ident);
+ }
+ i++;
+ }
+}
+
+static void
+codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links)
+{
+ struct bpf_program *prog;
+ int i;
+
+ if (!prog_cnt)
+ return;
+
+ codegen("\
+ \n\
+ \n\
+ /* programs */ \n\
+ s->prog_cnt = %zu; \n\
+ s->prog_skel_sz = sizeof(*s->progs); \n\
+ s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
+ if (!s->progs) \n\
+ goto err; \n\
+ ",
+ prog_cnt
+ );
+ i = 0;
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ \n\
+ s->progs[%1$zu].name = \"%2$s\"; \n\
+ s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
+ ",
+ i, bpf_program__name(prog));
+
+ if (populate_links) {
+ codegen("\
+ \n\
+ s->progs[%1$zu].link = &obj->links.%2$s;\n\
+ ",
+ i, bpf_program__name(prog));
+ }
+ i++;
+ }
+}
+
static int do_skeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
- size_t i, map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
+ size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
struct bpf_object *obj = NULL;
@@ -821,7 +990,7 @@ static int do_skeleton(int argc, char **argv)
prog_cnt++;
}
- get_header_guard(header_guard, obj_name);
+ get_header_guard(header_guard, obj_name, "SKEL_H");
if (use_loader) {
codegen("\
\n\
@@ -1024,66 +1193,10 @@ static int do_skeleton(int argc, char **argv)
",
obj_name
);
- if (map_cnt) {
- codegen("\
- \n\
- \n\
- /* maps */ \n\
- s->map_cnt = %zu; \n\
- s->map_skel_sz = sizeof(*s->maps); \n\
- s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
- if (!s->maps) \n\
- goto err; \n\
- ",
- map_cnt
- );
- i = 0;
- bpf_object__for_each_map(map, obj) {
- if (!get_map_ident(map, ident, sizeof(ident)))
- continue;
- codegen("\
- \n\
- \n\
- s->maps[%zu].name = \"%s\"; \n\
- s->maps[%zu].map = &obj->maps.%s; \n\
- ",
- i, bpf_map__name(map), i, ident);
- /* memory-mapped internal maps */
- if (bpf_map__is_internal(map) &&
- (bpf_map__map_flags(map) & BPF_F_MMAPABLE)) {
- printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
- i, ident);
- }
- i++;
- }
- }
- if (prog_cnt) {
- codegen("\
- \n\
- \n\
- /* programs */ \n\
- s->prog_cnt = %zu; \n\
- s->prog_skel_sz = sizeof(*s->progs); \n\
- s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
- if (!s->progs) \n\
- goto err; \n\
- ",
- prog_cnt
- );
- i = 0;
- bpf_object__for_each_program(prog, obj) {
- codegen("\
- \n\
- \n\
- s->progs[%1$zu].name = \"%2$s\"; \n\
- s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
- s->progs[%1$zu].link = &obj->links.%2$s;\n\
- ",
- i, bpf_program__name(prog));
- i++;
- }
- }
+ codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
+ codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
+
codegen("\
\n\
\n\
@@ -1141,6 +1254,310 @@ out:
return err;
}
+/* Subskeletons are like skeletons, except they don't own the bpf_object,
+ * associated maps, links, etc. Instead, they know about the existence of
+ * variables, maps, programs and are able to find their locations
+ * _at runtime_ from an already loaded bpf_object.
+ *
+ * This allows for library-like BPF objects to have userspace counterparts
+ * with access to their own items without having to know anything about the
+ * final BPF object that the library was linked into.
+ */
+static int do_subskeleton(int argc, char **argv)
+{
+ char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
+ size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
+ struct bpf_object *obj = NULL;
+ const char *file, *var_name;
+ char ident[256];
+ int fd, err = -1, map_type_id;
+ const struct bpf_map *map;
+ struct bpf_program *prog;
+ struct btf *btf;
+ const struct btf_type *map_type, *var_type;
+ const struct btf_var_secinfo *var;
+ struct stat st;
+
+ if (!REQ_ARGS(1)) {
+ usage();
+ return -1;
+ }
+ file = GET_ARG();
+
+ while (argc) {
+ if (!REQ_ARGS(2))
+ return -1;
+
+ if (is_prefix(*argv, "name")) {
+ NEXT_ARG();
+
+ if (obj_name[0] != '\0') {
+ p_err("object name already specified");
+ return -1;
+ }
+
+ strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
+ obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
+ } else {
+ p_err("unknown arg %s", *argv);
+ return -1;
+ }
+
+ NEXT_ARG();
+ }
+
+ if (argc) {
+ p_err("extra unknown arguments");
+ return -1;
+ }
+
+ if (use_loader) {
+ p_err("cannot use loader for subskeletons");
+ return -1;
+ }
+
+ if (stat(file, &st)) {
+ p_err("failed to stat() %s: %s", file, strerror(errno));
+ return -1;
+ }
+ file_sz = st.st_size;
+ mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
+ fd = open(file, O_RDONLY);
+ if (fd < 0) {
+ p_err("failed to open() %s: %s", file, strerror(errno));
+ return -1;
+ }
+ obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (obj_data == MAP_FAILED) {
+ obj_data = NULL;
+ p_err("failed to mmap() %s: %s", file, strerror(errno));
+ goto out;
+ }
+ if (obj_name[0] == '\0')
+ get_obj_name(obj_name, file);
+
+ /* The empty object name allows us to use bpf_map__name and produce
+ * ELF section names out of it. (".data" instead of "obj.data")
+ */
+ opts.object_name = "";
+ obj = bpf_object__open_mem(obj_data, file_sz, &opts);
+ if (!obj) {
+ char err_buf[256];
+
+ libbpf_strerror(errno, err_buf, sizeof(err_buf));
+ p_err("failed to open BPF object file: %s", err_buf);
+ obj = NULL;
+ goto out;
+ }
+
+ btf = bpf_object__btf(obj);
+ if (!btf) {
+ err = -1;
+ p_err("need btf type information for %s", obj_name);
+ goto out;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ prog_cnt++;
+ }
+
+ /* First, count how many variables we have to find.
+ * We need this in advance so the subskel can allocate the right
+ * amount of storage.
+ */
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+
+ /* Also count all maps that have a name */
+ map_cnt++;
+
+ if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ continue;
+
+ map_type_id = bpf_map__btf_value_type_id(map);
+ if (map_type_id <= 0) {
+ err = map_type_id;
+ goto out;
+ }
+ map_type = btf__type_by_id(btf, map_type_id);
+
+ var = btf_var_secinfos(map_type);
+ len = btf_vlen(map_type);
+ for (i = 0; i < len; i++, var++) {
+ var_type = btf__type_by_id(btf, var->type);
+
+ if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
+ continue;
+
+ var_cnt++;
+ }
+ }
+
+ get_header_guard(header_guard, obj_name, "SUBSKEL_H");
+ codegen("\
+ \n\
+ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
+ \n\
+ /* THIS FILE IS AUTOGENERATED! */ \n\
+ #ifndef %2$s \n\
+ #define %2$s \n\
+ \n\
+ #include <errno.h> \n\
+ #include <stdlib.h> \n\
+ #include <bpf/libbpf.h> \n\
+ \n\
+ struct %1$s { \n\
+ struct bpf_object *obj; \n\
+ struct bpf_object_subskeleton *subskel; \n\
+ ", obj_name, header_guard);
+
+ if (map_cnt) {
+ printf("\tstruct {\n");
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+ printf("\t\tstruct bpf_map *%s;\n", ident);
+ }
+ printf("\t} maps;\n");
+ }
+
+ if (prog_cnt) {
+ printf("\tstruct {\n");
+ bpf_object__for_each_program(prog, obj) {
+ printf("\t\tstruct bpf_program *%s;\n",
+ bpf_program__name(prog));
+ }
+ printf("\t} progs;\n");
+ }
+
+ err = codegen_subskel_datasecs(obj, obj_name);
+ if (err)
+ goto out;
+
+ /* emit code that will allocate enough storage for all symbols */
+ codegen("\
+ \n\
+ \n\
+ #ifdef __cplusplus \n\
+ static inline struct %1$s *open(const struct bpf_object *src);\n\
+ static inline void destroy(struct %1$s *skel); \n\
+ #endif /* __cplusplus */ \n\
+ }; \n\
+ \n\
+ static inline void \n\
+ %1$s__destroy(struct %1$s *skel) \n\
+ { \n\
+ if (!skel) \n\
+ return; \n\
+ if (skel->subskel) \n\
+ bpf_object__destroy_subskeleton(skel->subskel);\n\
+ free(skel); \n\
+ } \n\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open(const struct bpf_object *src) \n\
+ { \n\
+ struct %1$s *obj; \n\
+ struct bpf_object_subskeleton *s; \n\
+ int err; \n\
+ \n\
+ obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
+ if (!obj) { \n\
+ errno = ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
+ if (!s) { \n\
+ errno = ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ s->sz = sizeof(*s); \n\
+ s->obj = src; \n\
+ s->var_skel_sz = sizeof(*s->vars); \n\
+ obj->subskel = s; \n\
+ \n\
+ /* vars */ \n\
+ s->var_cnt = %2$d; \n\
+ s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
+ if (!s->vars) { \n\
+ errno = ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ ",
+ obj_name, var_cnt
+ );
+
+ /* walk through each symbol and emit the runtime representation */
+ bpf_object__for_each_map(map, obj) {
+ if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ continue;
+
+ map_type_id = bpf_map__btf_value_type_id(map);
+ if (map_type_id <= 0)
+ /* skip over internal maps with no type*/
+ continue;
+
+ map_type = btf__type_by_id(btf, map_type_id);
+ var = btf_var_secinfos(map_type);
+ len = btf_vlen(map_type);
+ for (i = 0; i < len; i++, var++) {
+ var_type = btf__type_by_id(btf, var->type);
+ var_name = btf__name_by_offset(btf, var_type->name_off);
+
+ if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
+ continue;
+
+ /* Note that we use the dot prefix in .data as the
+ * field access operator i.e. maps%s becomes maps.data
+ */
+ codegen("\
+ \n\
+ \n\
+ s->vars[%3$d].name = \"%1$s\"; \n\
+ s->vars[%3$d].map = &obj->maps.%2$s; \n\
+ s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
+ ", var_name, ident, var_idx);
+
+ var_idx++;
+ }
+ }
+
+ codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
+ codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
+
+ codegen("\
+ \n\
+ \n\
+ err = bpf_object__open_subskeleton(s); \n\
+ if (err) \n\
+ goto err; \n\
+ \n\
+ return obj; \n\
+ err: \n\
+ %1$s__destroy(obj); \n\
+ return NULL; \n\
+ } \n\
+ \n\
+ #ifdef __cplusplus \n\
+ struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
+ void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
+ #endif /* __cplusplus */ \n\
+ \n\
+ #endif /* %2$s */ \n\
+ ",
+ obj_name, header_guard);
+ err = 0;
+out:
+ bpf_object__close(obj);
+ if (obj_data)
+ munmap(obj_data, mmap_sz);
+ close(fd);
+ return err;
+}
+
static int do_object(int argc, char **argv)
{
struct bpf_linker *linker;
@@ -1192,6 +1609,7 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
" %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
+ " %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
" %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
" %1$s %2$s help\n"
"\n"
@@ -1788,6 +2206,7 @@ static int do_min_core_btf(int argc, char **argv)
static const struct cmd cmds[] = {
{ "object", do_object },
{ "skeleton", do_skeleton },
+ { "subskeleton", do_subskeleton },
{ "min_core_btf", do_min_core_btf},
{ "help", do_help },
{ 0 }
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 0468e5b24bd4..6e9277ffc68c 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -113,7 +113,9 @@ struct obj_ref {
struct obj_refs {
int ref_cnt;
+ bool has_bpf_cookie;
struct obj_ref *refs;
+ __u64 bpf_cookie;
};
struct btf;
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index e746642de292..c26378f20831 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -504,7 +504,7 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
jsonw_uint_field(json_wtr, "max_entries", info->max_entries);
if (memlock)
- jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
+ jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock));
free(memlock);
if (info->type == BPF_MAP_TYPE_PROG_ARRAY) {
@@ -620,17 +620,14 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
u32_as_hash_field(info->id))
printf("\n\tpinned %s", (char *)entry->value);
}
- printf("\n");
if (frozen_str) {
frozen = atoi(frozen_str);
free(frozen_str);
}
- if (!info->btf_id && !frozen)
- return 0;
-
- printf("\t");
+ if (info->btf_id || frozen)
+ printf("\n\t");
if (info->btf_id)
printf("btf_id %d", info->btf_id);
diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c
index 7c384d10e95f..bb6c969a114a 100644
--- a/tools/bpf/bpftool/pids.c
+++ b/tools/bpf/bpftool/pids.c
@@ -78,6 +78,8 @@ static void add_ref(struct hashmap *map, struct pid_iter_entry *e)
ref->pid = e->pid;
memcpy(ref->comm, e->comm, sizeof(ref->comm));
refs->ref_cnt = 1;
+ refs->has_bpf_cookie = e->has_bpf_cookie;
+ refs->bpf_cookie = e->bpf_cookie;
err = hashmap__append(map, u32_as_hash_field(e->id), refs);
if (err)
@@ -205,6 +207,9 @@ void emit_obj_refs_json(struct hashmap *map, __u32 id,
if (refs->ref_cnt == 0)
break;
+ if (refs->has_bpf_cookie)
+ jsonw_lluint_field(json_writer, "bpf_cookie", refs->bpf_cookie);
+
jsonw_name(json_writer, "pids");
jsonw_start_array(json_writer);
for (i = 0; i < refs->ref_cnt; i++) {
@@ -234,6 +239,9 @@ void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix)
if (refs->ref_cnt == 0)
break;
+ if (refs->has_bpf_cookie)
+ printf("\n\tbpf_cookie %llu", (unsigned long long) refs->bpf_cookie);
+
printf("%s", prefix);
for (i = 0; i < refs->ref_cnt; i++) {
struct obj_ref *ref = &refs->refs[i];
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 8a52eed19fa2..bc4e05542c2b 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -485,7 +485,7 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
memlock = get_fdinfo(fd, "memlock");
if (memlock)
- jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
+ jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock));
free(memlock);
if (info->nr_map_ids)
diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
index f70702fcb224..eb05ea53afb1 100644
--- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
@@ -38,6 +38,17 @@ static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type)
}
}
+/* could be used only with BPF_LINK_TYPE_PERF_EVENT links */
+static __u64 get_bpf_cookie(struct bpf_link *link)
+{
+ struct bpf_perf_link *perf_link;
+ struct perf_event *event;
+
+ perf_link = container_of(link, struct bpf_perf_link, link);
+ event = BPF_CORE_READ(perf_link, perf_file, private_data);
+ return BPF_CORE_READ(event, bpf_cookie);
+}
+
SEC("iter/task_file")
int iter(struct bpf_iter__task_file *ctx)
{
@@ -69,8 +80,19 @@ int iter(struct bpf_iter__task_file *ctx)
if (file->f_op != fops)
return 0;
+ __builtin_memset(&e, 0, sizeof(e));
e.pid = task->tgid;
e.id = get_obj_id(file->private_data, obj_type);
+
+ if (obj_type == BPF_OBJ_LINK) {
+ struct bpf_link *link = (struct bpf_link *) file->private_data;
+
+ if (BPF_CORE_READ(link, type) == BPF_LINK_TYPE_PERF_EVENT) {
+ e.has_bpf_cookie = true;
+ e.bpf_cookie = get_bpf_cookie(link);
+ }
+ }
+
bpf_probe_read_kernel_str(&e.comm, sizeof(e.comm),
task->group_leader->comm);
bpf_seq_write(ctx->meta->seq, &e, sizeof(e));
diff --git a/tools/bpf/bpftool/skeleton/pid_iter.h b/tools/bpf/bpftool/skeleton/pid_iter.h
index 5692cf257adb..bbb570d4cca6 100644
--- a/tools/bpf/bpftool/skeleton/pid_iter.h
+++ b/tools/bpf/bpftool/skeleton/pid_iter.h
@@ -6,6 +6,8 @@
struct pid_iter_entry {
__u32 id;
int pid;
+ __u64 bpf_cookie;
+ bool has_bpf_cookie;
char comm[16];
};
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4eebea830613..7604e7d5438f 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -997,6 +997,7 @@ enum bpf_attach_type {
BPF_SK_REUSEPORT_SELECT,
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
BPF_PERF_EVENT,
+ BPF_TRACE_KPROBE_MULTI,
__MAX_BPF_ATTACH_TYPE
};
@@ -1011,6 +1012,7 @@ enum bpf_link_type {
BPF_LINK_TYPE_NETNS = 5,
BPF_LINK_TYPE_XDP = 6,
BPF_LINK_TYPE_PERF_EVENT = 7,
+ BPF_LINK_TYPE_KPROBE_MULTI = 8,
MAX_BPF_LINK_TYPE,
};
@@ -1118,6 +1120,11 @@ enum bpf_link_type {
*/
#define BPF_F_XDP_HAS_FRAGS (1U << 5)
+/* link_create.kprobe_multi.flags used in LINK_CREATE command for
+ * BPF_TRACE_KPROBE_MULTI attach type to create return probe.
+ */
+#define BPF_F_KPROBE_MULTI_RETURN (1U << 0)
+
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
* the following extensions:
*
@@ -1232,6 +1239,8 @@ enum {
/* If set, run the test on the cpu specified by bpf_attr.test.cpu */
#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
+/* If set, XDP frames will be transmitted after processing */
+#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
/* type for BPF_ENABLE_STATS */
enum bpf_stats_type {
@@ -1393,6 +1402,7 @@ union bpf_attr {
__aligned_u64 ctx_out;
__u32 flags;
__u32 cpu;
+ __u32 batch_size;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
@@ -1472,6 +1482,13 @@ union bpf_attr {
*/
__u64 bpf_cookie;
} perf_event;
+ struct {
+ __u32 flags;
+ __u32 cnt;
+ __aligned_u64 syms;
+ __aligned_u64 addrs;
+ __aligned_u64 cookies;
+ } kprobe_multi;
};
} link_create;
@@ -2299,8 +2316,8 @@ union bpf_attr {
* Return
* The return value depends on the result of the test, and can be:
*
- * * 0, if current task belongs to the cgroup2.
- * * 1, if current task does not belong to the cgroup2.
+ * * 1, if current task belongs to the cgroup2.
+ * * 0, if current task does not belong to the cgroup2.
* * A negative error code, if an error occurred.
*
* long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
@@ -5087,23 +5104,22 @@ union bpf_attr {
* 0 on success, or a negative error in case of failure. On error
* *dst* buffer is zeroed out.
*
- * long bpf_skb_set_delivery_time(struct sk_buff *skb, u64 dtime, u32 dtime_type)
+ * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type)
* Description
- * Set a *dtime* (delivery time) to the __sk_buff->tstamp and also
- * change the __sk_buff->delivery_time_type to *dtime_type*.
- *
- * When setting a delivery time (non zero *dtime*) to
- * __sk_buff->tstamp, only BPF_SKB_DELIVERY_TIME_MONO *dtime_type*
- * is supported. It is the only delivery_time_type that will be
- * kept after bpf_redirect_*().
+ * Change the __sk_buff->tstamp_type to *tstamp_type*
+ * and set *tstamp* to the __sk_buff->tstamp together.
*
- * If there is no need to change the __sk_buff->delivery_time_type,
- * the delivery time can be directly written to __sk_buff->tstamp
+ * If there is no need to change the __sk_buff->tstamp_type,
+ * the tstamp value can be directly written to __sk_buff->tstamp
* instead.
*
- * *dtime* 0 and *dtime_type* BPF_SKB_DELIVERY_TIME_NONE
- * can be used to clear any delivery time stored in
- * __sk_buff->tstamp.
+ * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that
+ * will be kept during bpf_redirect_*(). A non zero
+ * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO
+ * *tstamp_type*.
+ *
+ * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used
+ * with a zero *tstamp*.
*
* Only IPv4 and IPv6 skb->protocol are supported.
*
@@ -5116,7 +5132,17 @@ union bpf_attr {
* Return
* 0 on success.
* **-EINVAL** for invalid input
- * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol
+ * **-EOPNOTSUPP** for unsupported protocol
+ *
+ * long bpf_ima_file_hash(struct file *file, void *dst, u32 size)
+ * Description
+ * Returns a calculated IMA hash of the *file*.
+ * If the hash is larger than *size*, then only *size*
+ * bytes will be copied to *dst*
+ * Return
+ * The **hash_algo** is returned on success,
+ * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
+ * invalid arguments are passed.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5311,7 +5337,8 @@ union bpf_attr {
FN(xdp_load_bytes), \
FN(xdp_store_bytes), \
FN(copy_from_user_task), \
- FN(skb_set_delivery_time), \
+ FN(skb_set_tstamp), \
+ FN(ima_file_hash), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -5502,9 +5529,12 @@ union { \
} __attribute__((aligned(8)))
enum {
- BPF_SKB_DELIVERY_TIME_NONE,
- BPF_SKB_DELIVERY_TIME_UNSPEC,
- BPF_SKB_DELIVERY_TIME_MONO,
+ BPF_SKB_TSTAMP_UNSPEC,
+ BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */
+ /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
+ * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
+ * and try to deduce it by ingress, egress or skb->sk->sk_clockid.
+ */
};
/* user accessible mirror of in-kernel sk_buff.
@@ -5547,7 +5577,7 @@ struct __sk_buff {
__u32 gso_segs;
__bpf_md_ptr(struct bpf_sock *, sk);
__u32 gso_size;
- __u8 delivery_time_type;
+ __u8 tstamp_type;
__u32 :24; /* Padding, future use. */
__u64 hwtstamp;
};
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 418b259166f8..cf27251adb92 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -29,6 +29,7 @@
#include <errno.h>
#include <linux/bpf.h>
#include <linux/filter.h>
+#include <linux/kernel.h>
#include <limits.h>
#include <sys/resource.h>
#include "bpf.h"
@@ -111,7 +112,7 @@ int probe_memcg_account(void)
BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
BPF_EXIT_INSN(),
};
- size_t insn_cnt = sizeof(insns) / sizeof(insns[0]);
+ size_t insn_cnt = ARRAY_SIZE(insns);
union bpf_attr attr;
int prog_fd;
@@ -853,6 +854,15 @@ int bpf_link_create(int prog_fd, int target_fd,
if (!OPTS_ZEROED(opts, perf_event))
return libbpf_err(-EINVAL);
break;
+ case BPF_TRACE_KPROBE_MULTI:
+ attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0);
+ attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0);
+ attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0));
+ attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0));
+ attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0));
+ if (!OPTS_ZEROED(opts, kprobe_multi))
+ return libbpf_err(-EINVAL);
+ break;
default:
if (!OPTS_ZEROED(opts, flags))
return libbpf_err(-EINVAL);
@@ -994,6 +1004,7 @@ int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
memset(&attr, 0, sizeof(attr));
attr.test.prog_fd = prog_fd;
+ attr.test.batch_size = OPTS_GET(opts, batch_size, 0);
attr.test.cpu = OPTS_GET(opts, cpu, 0);
attr.test.flags = OPTS_GET(opts, flags, 0);
attr.test.repeat = OPTS_GET(opts, repeat, 0);
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 16b21757b8bf..f4b4afb6d4ba 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -413,10 +413,17 @@ struct bpf_link_create_opts {
struct {
__u64 bpf_cookie;
} perf_event;
+ struct {
+ __u32 flags;
+ __u32 cnt;
+ const char **syms;
+ const unsigned long *addrs;
+ const __u64 *cookies;
+ } kprobe_multi;
};
size_t :0;
};
-#define bpf_link_create_opts__last_field perf_event
+#define bpf_link_create_opts__last_field kprobe_multi.cookies
LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
enum bpf_attach_type attach_type,
@@ -512,8 +519,9 @@ struct bpf_test_run_opts {
__u32 duration; /* out: average per repetition in ns */
__u32 flags;
__u32 cpu;
+ __u32 batch_size;
};
-#define bpf_test_run_opts__last_field cpu
+#define bpf_test_run_opts__last_field batch_size
LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
struct bpf_test_run_opts *opts);
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 81bf01d67671..809fe209cdcc 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -201,12 +201,6 @@ struct reloc_desc {
};
};
-struct bpf_sec_def;
-
-typedef int (*init_fn_t)(struct bpf_program *prog, long cookie);
-typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_opts *opts, long cookie);
-typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog, long cookie);
-
/* stored as sec_def->cookie for all libbpf-supported SEC()s */
enum sec_def_flags {
SEC_NONE = 0,
@@ -234,14 +228,15 @@ enum sec_def_flags {
};
struct bpf_sec_def {
- const char *sec;
+ char *sec;
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
long cookie;
+ int handler_id;
- init_fn_t init_fn;
- preload_fn_t preload_fn;
- attach_fn_t attach_fn;
+ libbpf_prog_setup_fn_t prog_setup_fn;
+ libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
+ libbpf_prog_attach_fn_t prog_attach_fn;
};
/*
@@ -1523,6 +1518,9 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name)
}
static int
+bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map);
+
+static int
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
const char *real_name, int sec_idx, void *data, size_t data_sz)
{
@@ -1569,6 +1567,9 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
return err;
}
+ /* failures are fine because of maps like .rodata.str1.1 */
+ (void) bpf_map_find_btf_info(obj, map);
+
if (data)
memcpy(map->mmaped, data, data_sz);
@@ -2051,6 +2052,9 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
}
memcpy(&map->def, def, sizeof(struct bpf_map_def));
}
+
+ /* btf info may not exist but fill it in if it does exist */
+ (void) bpf_map_find_btf_info(obj, map);
}
return 0;
}
@@ -2539,6 +2543,10 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
fill_map_from_def(map->inner_map, &inner_def);
}
+ err = bpf_map_find_btf_info(obj, map);
+ if (err)
+ return err;
+
return 0;
}
@@ -3837,7 +3845,14 @@ static bool prog_is_subprog(const struct bpf_object *obj,
* .text programs are subprograms (even if they are not called from
* other programs), because libbpf never explicitly supported mixing
* SEC()-designated BPF programs and .text entry-point BPF programs.
+ *
+ * In libbpf 1.0 strict mode, we always consider .text
+ * programs to be subprograms.
*/
+
+ if (libbpf_mode & LIBBPF_STRICT_SEC_NAME)
+ return prog->sec_idx == obj->efile.text_shndx;
+
return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
}
@@ -4182,6 +4197,9 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
__u32 key_type_id = 0, value_type_id = 0;
int ret;
+ if (!obj->btf)
+ return -ENOENT;
+
/* if it's BTF-defined map, we don't need to search for type IDs.
* For struct_ops map, it does not need btf_key_type_id and
* btf_value_type_id.
@@ -4805,8 +4823,8 @@ bpf_object__reuse_map(struct bpf_map *map)
}
err = bpf_map__reuse_fd(map, pin_fd);
+ close(pin_fd);
if (err) {
- close(pin_fd);
return err;
}
map->pinned = true;
@@ -4871,7 +4889,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
if (bpf_map__is_struct_ops(map))
create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
- if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
+ if (obj->btf && btf__fd(obj->btf) >= 0) {
create_attr.btf_fd = btf__fd(obj->btf);
create_attr.btf_key_type_id = map->btf_key_type_id;
create_attr.btf_value_type_id = map->btf_value_type_id;
@@ -6572,9 +6590,9 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program
static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
int *btf_obj_fd, int *btf_type_id);
-/* this is called as prog->sec_def->preload_fn for libbpf-supported sec_defs */
-static int libbpf_preload_prog(struct bpf_program *prog,
- struct bpf_prog_load_opts *opts, long cookie)
+/* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
+static int libbpf_prepare_prog_load(struct bpf_program *prog,
+ struct bpf_prog_load_opts *opts, long cookie)
{
enum sec_def_flags def = cookie;
@@ -6670,8 +6688,8 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog
load_attr.fd_array = obj->fd_array;
/* adjust load_attr if sec_def provides custom preload callback */
- if (prog->sec_def && prog->sec_def->preload_fn) {
- err = prog->sec_def->preload_fn(prog, &load_attr, prog->sec_def->cookie);
+ if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
+ err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
if (err < 0) {
pr_warn("prog '%s': failed to prepare load attributes: %d\n",
prog->name, err);
@@ -6971,8 +6989,8 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
/* sec_def can have custom callback which should be called
* after bpf_program is initialized to adjust its properties
*/
- if (prog->sec_def->init_fn) {
- err = prog->sec_def->init_fn(prog, prog->sec_def->cookie);
+ if (prog->sec_def->prog_setup_fn) {
+ err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
if (err < 0) {
pr_warn("prog '%s': failed to initialize: %d\n",
prog->name, err);
@@ -7176,12 +7194,10 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
return 0;
}
-static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
+int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
{
char sym_type, sym_name[500];
unsigned long long sym_addr;
- const struct btf_type *t;
- struct extern_desc *ext;
int ret, err = 0;
FILE *f;
@@ -7200,35 +7216,51 @@ static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
if (ret != 3) {
pr_warn("failed to read kallsyms entry: %d\n", ret);
err = -EINVAL;
- goto out;
+ break;
}
- ext = find_extern_by_name(obj, sym_name);
- if (!ext || ext->type != EXT_KSYM)
- continue;
-
- t = btf__type_by_id(obj->btf, ext->btf_id);
- if (!btf_is_var(t))
- continue;
-
- if (ext->is_set && ext->ksym.addr != sym_addr) {
- pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
- sym_name, ext->ksym.addr, sym_addr);
- err = -EINVAL;
- goto out;
- }
- if (!ext->is_set) {
- ext->is_set = true;
- ext->ksym.addr = sym_addr;
- pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
- }
+ err = cb(sym_addr, sym_type, sym_name, ctx);
+ if (err)
+ break;
}
-out:
fclose(f);
return err;
}
+static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
+ const char *sym_name, void *ctx)
+{
+ struct bpf_object *obj = ctx;
+ const struct btf_type *t;
+ struct extern_desc *ext;
+
+ ext = find_extern_by_name(obj, sym_name);
+ if (!ext || ext->type != EXT_KSYM)
+ return 0;
+
+ t = btf__type_by_id(obj->btf, ext->btf_id);
+ if (!btf_is_var(t))
+ return 0;
+
+ if (ext->is_set && ext->ksym.addr != sym_addr) {
+ pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
+ sym_name, ext->ksym.addr, sym_addr);
+ return -EINVAL;
+ }
+ if (!ext->is_set) {
+ ext->is_set = true;
+ ext->ksym.addr = sym_addr;
+ pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
+ }
+ return 0;
+}
+
+static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
+{
+ return libbpf_kallsyms_parse(kallsyms_cb, obj);
+}
+
static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
__u16 kind, struct btf **res_btf,
struct module_btf **res_mod_btf)
@@ -8589,20 +8621,21 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log
}
#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
- .sec = sec_pfx, \
+ .sec = (char *)sec_pfx, \
.prog_type = BPF_PROG_TYPE_##ptype, \
.expected_attach_type = atype, \
.cookie = (long)(flags), \
- .preload_fn = libbpf_preload_prog, \
+ .prog_prepare_load_fn = libbpf_prepare_prog_load, \
__VA_ARGS__ \
}
-static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie);
-static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie);
-static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie);
-static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie);
-static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie);
-static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie);
+static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static const struct bpf_sec_def section_defs[] = {
SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX),
@@ -8612,6 +8645,8 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE),
SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE),
+ SEC_DEF("kprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
+ SEC_DEF("kretprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE),
SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED),
SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX),
@@ -8682,61 +8717,167 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
};
-#define MAX_TYPE_NAME_SIZE 32
+static size_t custom_sec_def_cnt;
+static struct bpf_sec_def *custom_sec_defs;
+static struct bpf_sec_def custom_fallback_def;
+static bool has_custom_fallback_def;
-static const struct bpf_sec_def *find_sec_def(const char *sec_name)
+static int last_custom_sec_def_handler_id;
+
+int libbpf_register_prog_handler(const char *sec,
+ enum bpf_prog_type prog_type,
+ enum bpf_attach_type exp_attach_type,
+ const struct libbpf_prog_handler_opts *opts)
{
- const struct bpf_sec_def *sec_def;
- enum sec_def_flags sec_flags;
- int i, n = ARRAY_SIZE(section_defs), len;
- bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME;
+ struct bpf_sec_def *sec_def;
- for (i = 0; i < n; i++) {
- sec_def = &section_defs[i];
- sec_flags = sec_def->cookie;
- len = strlen(sec_def->sec);
+ if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
+ return libbpf_err(-EINVAL);
- /* "type/" always has to have proper SEC("type/extras") form */
- if (sec_def->sec[len - 1] == '/') {
- if (str_has_pfx(sec_name, sec_def->sec))
- return sec_def;
- continue;
- }
+ if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */
+ return libbpf_err(-E2BIG);
- /* "type+" means it can be either exact SEC("type") or
- * well-formed SEC("type/extras") with proper '/' separator
- */
- if (sec_def->sec[len - 1] == '+') {
- len--;
- /* not even a prefix */
- if (strncmp(sec_name, sec_def->sec, len) != 0)
- continue;
- /* exact match or has '/' separator */
- if (sec_name[len] == '\0' || sec_name[len] == '/')
- return sec_def;
- continue;
- }
+ if (sec) {
+ sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
+ sizeof(*sec_def));
+ if (!sec_def)
+ return libbpf_err(-ENOMEM);
- /* SEC_SLOPPY_PFX definitions are allowed to be just prefix
- * matches, unless strict section name mode
- * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the
- * match has to be exact.
- */
- if ((sec_flags & SEC_SLOPPY_PFX) && !strict) {
- if (str_has_pfx(sec_name, sec_def->sec))
- return sec_def;
- continue;
- }
+ custom_sec_defs = sec_def;
+ sec_def = &custom_sec_defs[custom_sec_def_cnt];
+ } else {
+ if (has_custom_fallback_def)
+ return libbpf_err(-EBUSY);
- /* Definitions not marked SEC_SLOPPY_PFX (e.g.,
- * SEC("syscall")) are exact matches in both modes.
- */
- if (strcmp(sec_name, sec_def->sec) == 0)
+ sec_def = &custom_fallback_def;
+ }
+
+ sec_def->sec = sec ? strdup(sec) : NULL;
+ if (sec && !sec_def->sec)
+ return libbpf_err(-ENOMEM);
+
+ sec_def->prog_type = prog_type;
+ sec_def->expected_attach_type = exp_attach_type;
+ sec_def->cookie = OPTS_GET(opts, cookie, 0);
+
+ sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
+ sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
+ sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
+
+ sec_def->handler_id = ++last_custom_sec_def_handler_id;
+
+ if (sec)
+ custom_sec_def_cnt++;
+ else
+ has_custom_fallback_def = true;
+
+ return sec_def->handler_id;
+}
+
+int libbpf_unregister_prog_handler(int handler_id)
+{
+ struct bpf_sec_def *sec_defs;
+ int i;
+
+ if (handler_id <= 0)
+ return libbpf_err(-EINVAL);
+
+ if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
+ memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
+ has_custom_fallback_def = false;
+ return 0;
+ }
+
+ for (i = 0; i < custom_sec_def_cnt; i++) {
+ if (custom_sec_defs[i].handler_id == handler_id)
+ break;
+ }
+
+ if (i == custom_sec_def_cnt)
+ return libbpf_err(-ENOENT);
+
+ free(custom_sec_defs[i].sec);
+ for (i = i + 1; i < custom_sec_def_cnt; i++)
+ custom_sec_defs[i - 1] = custom_sec_defs[i];
+ custom_sec_def_cnt--;
+
+ /* try to shrink the array, but it's ok if we couldn't */
+ sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
+ if (sec_defs)
+ custom_sec_defs = sec_defs;
+
+ return 0;
+}
+
+static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name,
+ bool allow_sloppy)
+{
+ size_t len = strlen(sec_def->sec);
+
+ /* "type/" always has to have proper SEC("type/extras") form */
+ if (sec_def->sec[len - 1] == '/') {
+ if (str_has_pfx(sec_name, sec_def->sec))
+ return true;
+ return false;
+ }
+
+ /* "type+" means it can be either exact SEC("type") or
+ * well-formed SEC("type/extras") with proper '/' separator
+ */
+ if (sec_def->sec[len - 1] == '+') {
+ len--;
+ /* not even a prefix */
+ if (strncmp(sec_name, sec_def->sec, len) != 0)
+ return false;
+ /* exact match or has '/' separator */
+ if (sec_name[len] == '\0' || sec_name[len] == '/')
+ return true;
+ return false;
+ }
+
+ /* SEC_SLOPPY_PFX definitions are allowed to be just prefix
+ * matches, unless strict section name mode
+ * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the
+ * match has to be exact.
+ */
+ if (allow_sloppy && str_has_pfx(sec_name, sec_def->sec))
+ return true;
+
+ /* Definitions not marked SEC_SLOPPY_PFX (e.g.,
+ * SEC("syscall")) are exact matches in both modes.
+ */
+ return strcmp(sec_name, sec_def->sec) == 0;
+}
+
+static const struct bpf_sec_def *find_sec_def(const char *sec_name)
+{
+ const struct bpf_sec_def *sec_def;
+ int i, n;
+ bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME, allow_sloppy;
+
+ n = custom_sec_def_cnt;
+ for (i = 0; i < n; i++) {
+ sec_def = &custom_sec_defs[i];
+ if (sec_def_matches(sec_def, sec_name, false))
+ return sec_def;
+ }
+
+ n = ARRAY_SIZE(section_defs);
+ for (i = 0; i < n; i++) {
+ sec_def = &section_defs[i];
+ allow_sloppy = (sec_def->cookie & SEC_SLOPPY_PFX) && !strict;
+ if (sec_def_matches(sec_def, sec_name, allow_sloppy))
return sec_def;
}
+
+ if (has_custom_fallback_def)
+ return &custom_fallback_def;
+
return NULL;
}
+#define MAX_TYPE_NAME_SIZE 32
+
static char *libbpf_get_type_names(bool attach_type)
{
int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
@@ -8752,7 +8893,7 @@ static char *libbpf_get_type_names(bool attach_type)
const struct bpf_sec_def *sec_def = &section_defs[i];
if (attach_type) {
- if (sec_def->preload_fn != libbpf_preload_prog)
+ if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
continue;
if (!(sec_def->cookie & SEC_ATTACHABLE))
@@ -9135,7 +9276,7 @@ int libbpf_attach_type_by_name(const char *name,
return libbpf_err(-EINVAL);
}
- if (sec_def->preload_fn != libbpf_preload_prog)
+ if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
return libbpf_err(-EINVAL);
if (!(sec_def->cookie & SEC_ATTACHABLE))
return libbpf_err(-EINVAL);
@@ -10109,14 +10250,146 @@ struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
}
-static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie)
+/* Adapted from perf/util/string.c */
+static bool glob_match(const char *str, const char *pat)
+{
+ while (*str && *pat && *pat != '*') {
+ if (*pat == '?') { /* Matches any single character */
+ str++;
+ pat++;
+ continue;
+ }
+ if (*str != *pat)
+ return false;
+ str++;
+ pat++;
+ }
+ /* Check wild card */
+ if (*pat == '*') {
+ while (*pat == '*')
+ pat++;
+ if (!*pat) /* Tail wild card matches all */
+ return true;
+ while (*str)
+ if (glob_match(str++, pat))
+ return true;
+ }
+ return !*str && !*pat;
+}
+
+struct kprobe_multi_resolve {
+ const char *pattern;
+ unsigned long *addrs;
+ size_t cap;
+ size_t cnt;
+};
+
+static int
+resolve_kprobe_multi_cb(unsigned long long sym_addr, char sym_type,
+ const char *sym_name, void *ctx)
+{
+ struct kprobe_multi_resolve *res = ctx;
+ int err;
+
+ if (!glob_match(sym_name, res->pattern))
+ return 0;
+
+ err = libbpf_ensure_mem((void **) &res->addrs, &res->cap, sizeof(unsigned long),
+ res->cnt + 1);
+ if (err)
+ return err;
+
+ res->addrs[res->cnt++] = (unsigned long) sym_addr;
+ return 0;
+}
+
+struct bpf_link *
+bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
+ const char *pattern,
+ const struct bpf_kprobe_multi_opts *opts)
+{
+ LIBBPF_OPTS(bpf_link_create_opts, lopts);
+ struct kprobe_multi_resolve res = {
+ .pattern = pattern,
+ };
+ struct bpf_link *link = NULL;
+ char errmsg[STRERR_BUFSIZE];
+ const unsigned long *addrs;
+ int err, link_fd, prog_fd;
+ const __u64 *cookies;
+ const char **syms;
+ bool retprobe;
+ size_t cnt;
+
+ if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ syms = OPTS_GET(opts, syms, false);
+ addrs = OPTS_GET(opts, addrs, false);
+ cnt = OPTS_GET(opts, cnt, false);
+ cookies = OPTS_GET(opts, cookies, false);
+
+ if (!pattern && !addrs && !syms)
+ return libbpf_err_ptr(-EINVAL);
+ if (pattern && (addrs || syms || cookies || cnt))
+ return libbpf_err_ptr(-EINVAL);
+ if (!pattern && !cnt)
+ return libbpf_err_ptr(-EINVAL);
+ if (addrs && syms)
+ return libbpf_err_ptr(-EINVAL);
+
+ if (pattern) {
+ err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res);
+ if (err)
+ goto error;
+ if (!res.cnt) {
+ err = -ENOENT;
+ goto error;
+ }
+ addrs = res.addrs;
+ cnt = res.cnt;
+ }
+
+ retprobe = OPTS_GET(opts, retprobe, false);
+
+ lopts.kprobe_multi.syms = syms;
+ lopts.kprobe_multi.addrs = addrs;
+ lopts.kprobe_multi.cookies = cookies;
+ lopts.kprobe_multi.cnt = cnt;
+ lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
+
+ link = calloc(1, sizeof(*link));
+ if (!link) {
+ err = -ENOMEM;
+ goto error;
+ }
+ link->detach = &bpf_link__detach_fd;
+
+ prog_fd = bpf_program__fd(prog);
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
+ if (link_fd < 0) {
+ err = -errno;
+ pr_warn("prog '%s': failed to attach: %s\n",
+ prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto error;
+ }
+ link->fd = link_fd;
+ free(res.addrs);
+ return link;
+
+error:
+ free(link);
+ free(res.addrs);
+ return libbpf_err_ptr(err);
+}
+
+static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
unsigned long offset = 0;
- struct bpf_link *link;
const char *func_name;
char *func;
- int n, err;
+ int n;
opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
if (opts.retprobe)
@@ -10126,21 +10399,43 @@ static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cooki
n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
if (n < 1) {
- err = -EINVAL;
pr_warn("kprobe name is invalid: %s\n", func_name);
- return libbpf_err_ptr(err);
+ return -EINVAL;
}
if (opts.retprobe && offset != 0) {
free(func);
- err = -EINVAL;
pr_warn("kretprobes do not support offset specification\n");
- return libbpf_err_ptr(err);
+ return -EINVAL;
}
opts.offset = offset;
- link = bpf_program__attach_kprobe_opts(prog, func, &opts);
+ *link = bpf_program__attach_kprobe_opts(prog, func, &opts);
free(func);
- return link;
+ return libbpf_get_error(*link);
+}
+
+static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ const char *spec;
+ char *pattern;
+ int n;
+
+ opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
+ if (opts.retprobe)
+ spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
+ else
+ spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
+
+ n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
+ if (n < 1) {
+ pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
+ return -EINVAL;
+ }
+
+ *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
+ free(pattern);
+ return libbpf_get_error(*link);
}
static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
@@ -10395,14 +10690,13 @@ struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
}
-static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie)
+static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
char *sec_name, *tp_cat, *tp_name;
- struct bpf_link *link;
sec_name = strdup(prog->sec_name);
if (!sec_name)
- return libbpf_err_ptr(-ENOMEM);
+ return -ENOMEM;
/* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
if (str_has_pfx(prog->sec_name, "tp/"))
@@ -10412,14 +10706,14 @@ static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie)
tp_name = strchr(tp_cat, '/');
if (!tp_name) {
free(sec_name);
- return libbpf_err_ptr(-EINVAL);
+ return -EINVAL;
}
*tp_name = '\0';
tp_name++;
- link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
+ *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
free(sec_name);
- return link;
+ return libbpf_get_error(*link);
}
struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
@@ -10452,7 +10746,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *pr
return link;
}
-static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie)
+static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
static const char *const prefixes[] = {
"raw_tp/",
@@ -10472,10 +10766,11 @@ static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cooki
if (!tp_name) {
pr_warn("prog '%s': invalid section name '%s'\n",
prog->name, prog->sec_name);
- return libbpf_err_ptr(-EINVAL);
+ return -EINVAL;
}
- return bpf_program__attach_raw_tracepoint(prog, tp_name);
+ *link = bpf_program__attach_raw_tracepoint(prog, tp_name);
+ return libbpf_get_error(link);
}
/* Common logic for all BPF program types that attach to a btf_id */
@@ -10518,14 +10813,16 @@ struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
return bpf_program__attach_btf_id(prog);
}
-static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie)
+static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
- return bpf_program__attach_trace(prog);
+ *link = bpf_program__attach_trace(prog);
+ return libbpf_get_error(*link);
}
-static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie)
+static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
- return bpf_program__attach_lsm(prog);
+ *link = bpf_program__attach_lsm(prog);
+ return libbpf_get_error(*link);
}
static struct bpf_link *
@@ -10654,17 +10951,33 @@ bpf_program__attach_iter(const struct bpf_program *prog,
return link;
}
-static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie)
+static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
- return bpf_program__attach_iter(prog, NULL);
+ *link = bpf_program__attach_iter(prog, NULL);
+ return libbpf_get_error(*link);
}
struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
{
- if (!prog->sec_def || !prog->sec_def->attach_fn)
- return libbpf_err_ptr(-ESRCH);
+ struct bpf_link *link = NULL;
+ int err;
+
+ if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
+ return libbpf_err_ptr(-EOPNOTSUPP);
+
+ err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
+ if (err)
+ return libbpf_err_ptr(err);
- return prog->sec_def->attach_fn(prog, prog->sec_def->cookie);
+ /* When calling bpf_program__attach() explicitly, auto-attach support
+ * is expected to work, so NULL returned link is considered an error.
+ * This is different for skeleton's attach, see comment in
+ * bpf_object__attach_skeleton().
+ */
+ if (!link)
+ return libbpf_err_ptr(-EOPNOTSUPP);
+
+ return link;
}
static int bpf_link__detach_struct_ops(struct bpf_link *link)
@@ -11679,6 +11992,49 @@ int libbpf_num_possible_cpus(void)
return tmp_cpus;
}
+static int populate_skeleton_maps(const struct bpf_object *obj,
+ struct bpf_map_skeleton *maps,
+ size_t map_cnt)
+{
+ int i;
+
+ for (i = 0; i < map_cnt; i++) {
+ struct bpf_map **map = maps[i].map;
+ const char *name = maps[i].name;
+ void **mmaped = maps[i].mmaped;
+
+ *map = bpf_object__find_map_by_name(obj, name);
+ if (!*map) {
+ pr_warn("failed to find skeleton map '%s'\n", name);
+ return -ESRCH;
+ }
+
+ /* externs shouldn't be pre-setup from user code */
+ if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
+ *mmaped = (*map)->mmaped;
+ }
+ return 0;
+}
+
+static int populate_skeleton_progs(const struct bpf_object *obj,
+ struct bpf_prog_skeleton *progs,
+ size_t prog_cnt)
+{
+ int i;
+
+ for (i = 0; i < prog_cnt; i++) {
+ struct bpf_program **prog = progs[i].prog;
+ const char *name = progs[i].name;
+
+ *prog = bpf_object__find_program_by_name(obj, name);
+ if (!*prog) {
+ pr_warn("failed to find skeleton program '%s'\n", name);
+ return -ESRCH;
+ }
+ }
+ return 0;
+}
+
int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
const struct bpf_object_open_opts *opts)
{
@@ -11686,7 +12042,7 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
.object_name = s->name,
);
struct bpf_object *obj;
- int i, err;
+ int err;
/* Attempt to preserve opts->object_name, unless overriden by user
* explicitly. Overwriting object name for skeletons is discouraged,
@@ -11709,37 +12065,91 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
}
*s->obj = obj;
+ err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
+ if (err) {
+ pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
+ return libbpf_err(err);
+ }
- for (i = 0; i < s->map_cnt; i++) {
- struct bpf_map **map = s->maps[i].map;
- const char *name = s->maps[i].name;
- void **mmaped = s->maps[i].mmaped;
+ err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
+ if (err) {
+ pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
+ return libbpf_err(err);
+ }
- *map = bpf_object__find_map_by_name(obj, name);
- if (!*map) {
- pr_warn("failed to find skeleton map '%s'\n", name);
- return libbpf_err(-ESRCH);
- }
+ return 0;
+}
- /* externs shouldn't be pre-setup from user code */
- if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
- *mmaped = (*map)->mmaped;
+int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
+{
+ int err, len, var_idx, i;
+ const char *var_name;
+ const struct bpf_map *map;
+ struct btf *btf;
+ __u32 map_type_id;
+ const struct btf_type *map_type, *var_type;
+ const struct bpf_var_skeleton *var_skel;
+ struct btf_var_secinfo *var;
+
+ if (!s->obj)
+ return libbpf_err(-EINVAL);
+
+ btf = bpf_object__btf(s->obj);
+ if (!btf) {
+ pr_warn("subskeletons require BTF at runtime (object %s)\n",
+ bpf_object__name(s->obj));
+ return libbpf_err(-errno);
}
- for (i = 0; i < s->prog_cnt; i++) {
- struct bpf_program **prog = s->progs[i].prog;
- const char *name = s->progs[i].name;
+ err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
+ if (err) {
+ pr_warn("failed to populate subskeleton maps: %d\n", err);
+ return libbpf_err(err);
+ }
- *prog = bpf_object__find_program_by_name(obj, name);
- if (!*prog) {
- pr_warn("failed to find skeleton program '%s'\n", name);
- return libbpf_err(-ESRCH);
- }
+ err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
+ if (err) {
+ pr_warn("failed to populate subskeleton maps: %d\n", err);
+ return libbpf_err(err);
}
+ for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
+ var_skel = &s->vars[var_idx];
+ map = *var_skel->map;
+ map_type_id = bpf_map__btf_value_type_id(map);
+ map_type = btf__type_by_id(btf, map_type_id);
+
+ if (!btf_is_datasec(map_type)) {
+ pr_warn("type for map '%1$s' is not a datasec: %2$s",
+ bpf_map__name(map),
+ __btf_kind_str(btf_kind(map_type)));
+ return libbpf_err(-EINVAL);
+ }
+
+ len = btf_vlen(map_type);
+ var = btf_var_secinfos(map_type);
+ for (i = 0; i < len; i++, var++) {
+ var_type = btf__type_by_id(btf, var->type);
+ var_name = btf__name_by_offset(btf, var_type->name_off);
+ if (strcmp(var_name, var_skel->name) == 0) {
+ *var_skel->addr = map->mmaped + var->offset;
+ break;
+ }
+ }
+ }
return 0;
}
+void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
+{
+ if (!s)
+ return;
+ free(s->maps);
+ free(s->progs);
+ free(s->vars);
+ free(s);
+}
+
int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
{
int i, err;
@@ -11805,16 +12215,30 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
continue;
/* auto-attaching not supported for this program */
- if (!prog->sec_def || !prog->sec_def->attach_fn)
+ if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
continue;
- *link = bpf_program__attach(prog);
- err = libbpf_get_error(*link);
+ /* if user already set the link manually, don't attempt auto-attach */
+ if (*link)
+ continue;
+
+ err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
if (err) {
- pr_warn("failed to auto-attach program '%s': %d\n",
+ pr_warn("prog '%s': failed to auto-attach: %d\n",
bpf_program__name(prog), err);
return libbpf_err(err);
}
+
+ /* It's possible that for some SEC() definitions auto-attach
+ * is supported in some cases (e.g., if definition completely
+ * specifies target information), but is not in other cases.
+ * SEC("uprobe") is one such case. If user specified target
+ * binary and function name, such BPF program can be
+ * auto-attached. But if not, it shouldn't trigger skeleton's
+ * attach to fail. It should just be skipped.
+ * attach_fn signals such case with returning 0 (no error) and
+ * setting link to NULL.
+ */
}
return 0;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index c8d8daad212e..05dde85e19a6 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -425,6 +425,29 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
const char *func_name,
const struct bpf_kprobe_opts *opts);
+struct bpf_kprobe_multi_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* array of function symbols to attach */
+ const char **syms;
+ /* array of function addresses to attach */
+ const unsigned long *addrs;
+ /* array of user-provided values fetchable through bpf_get_attach_cookie */
+ const __u64 *cookies;
+ /* number of elements in syms/addrs/cookies arrays */
+ size_t cnt;
+ /* create return kprobes */
+ bool retprobe;
+ size_t :0;
+};
+
+#define bpf_kprobe_multi_opts__last_field retprobe
+
+LIBBPF_API struct bpf_link *
+bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
+ const char *pattern,
+ const struct bpf_kprobe_multi_opts *opts);
+
struct bpf_uprobe_opts {
/* size of this struct, for forward/backward compatiblity */
size_t sz;
@@ -1289,6 +1312,35 @@ LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s);
+struct bpf_var_skeleton {
+ const char *name;
+ struct bpf_map **map;
+ void **addr;
+};
+
+struct bpf_object_subskeleton {
+ size_t sz; /* size of this struct, for forward/backward compatibility */
+
+ const struct bpf_object *obj;
+
+ int map_cnt;
+ int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */
+ struct bpf_map_skeleton *maps;
+
+ int prog_cnt;
+ int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */
+ struct bpf_prog_skeleton *progs;
+
+ int var_cnt;
+ int var_skel_sz; /* sizeof(struct bpf_var_skeleton) */
+ struct bpf_var_skeleton *vars;
+};
+
+LIBBPF_API int
+bpf_object__open_subskeleton(struct bpf_object_subskeleton *s);
+LIBBPF_API void
+bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s);
+
struct gen_loader_opts {
size_t sz; /* size of this struct, for forward/backward compatiblity */
const char *data;
@@ -1328,6 +1380,115 @@ LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker,
LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);
+/*
+ * Custom handling of BPF program's SEC() definitions
+ */
+
+struct bpf_prog_load_opts; /* defined in bpf.h */
+
+/* Called during bpf_object__open() for each recognized BPF program. Callback
+ * can use various bpf_program__set_*() setters to adjust whatever properties
+ * are necessary.
+ */
+typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie);
+
+/* Called right before libbpf performs bpf_prog_load() to load BPF program
+ * into the kernel. Callback can adjust opts as necessary.
+ */
+typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog,
+ struct bpf_prog_load_opts *opts, long cookie);
+
+/* Called during skeleton attach or through bpf_program__attach(). If
+ * auto-attach is not supported, callback should return 0 and set link to
+ * NULL (it's not considered an error during skeleton attach, but it will be
+ * an error for bpf_program__attach() calls). On error, error should be
+ * returned directly and link set to NULL. On success, return 0 and set link
+ * to a valid struct bpf_link.
+ */
+typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie,
+ struct bpf_link **link);
+
+struct libbpf_prog_handler_opts {
+ /* size of this struct, for forward/backward compatiblity */
+ size_t sz;
+ /* User-provided value that is passed to prog_setup_fn,
+ * prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to
+ * register one set of callbacks for multiple SEC() definitions and
+ * still be able to distinguish them, if necessary. For example,
+ * libbpf itself is using this to pass necessary flags (e.g.,
+ * sleepable flag) to a common internal SEC() handler.
+ */
+ long cookie;
+ /* BPF program initialization callback (see libbpf_prog_setup_fn_t).
+ * Callback is optional, pass NULL if it's not necessary.
+ */
+ libbpf_prog_setup_fn_t prog_setup_fn;
+ /* BPF program loading callback (see libbpf_prog_prepare_load_fn_t).
+ * Callback is optional, pass NULL if it's not necessary.
+ */
+ libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
+ /* BPF program attach callback (see libbpf_prog_attach_fn_t).
+ * Callback is optional, pass NULL if it's not necessary.
+ */
+ libbpf_prog_attach_fn_t prog_attach_fn;
+};
+#define libbpf_prog_handler_opts__last_field prog_attach_fn
+
+/**
+ * @brief **libbpf_register_prog_handler()** registers a custom BPF program
+ * SEC() handler.
+ * @param sec section prefix for which custom handler is registered
+ * @param prog_type BPF program type associated with specified section
+ * @param exp_attach_type Expected BPF attach type associated with specified section
+ * @param opts optional cookie, callbacks, and other extra options
+ * @return Non-negative handler ID is returned on success. This handler ID has
+ * to be passed to *libbpf_unregister_prog_handler()* to unregister such
+ * custom handler. Negative error code is returned on error.
+ *
+ * *sec* defines which SEC() definitions are handled by this custom handler
+ * registration. *sec* can have few different forms:
+ * - if *sec* is just a plain string (e.g., "abc"), it will match only
+ * SEC("abc"). If BPF program specifies SEC("abc/whatever") it will result
+ * in an error;
+ * - if *sec* is of the form "abc/", proper SEC() form is
+ * SEC("abc/something"), where acceptable "something" should be checked by
+ * *prog_init_fn* callback, if there are additional restrictions;
+ * - if *sec* is of the form "abc+", it will successfully match both
+ * SEC("abc") and SEC("abc/whatever") forms;
+ * - if *sec* is NULL, custom handler is registered for any BPF program that
+ * doesn't match any of the registered (custom or libbpf's own) SEC()
+ * handlers. There could be only one such generic custom handler registered
+ * at any given time.
+ *
+ * All custom handlers (except the one with *sec* == NULL) are processed
+ * before libbpf's own SEC() handlers. It is allowed to "override" libbpf's
+ * SEC() handlers by registering custom ones for the same section prefix
+ * (i.e., it's possible to have custom SEC("perf_event/LLC-load-misses")
+ * handler).
+ *
+ * Note, like much of global libbpf APIs (e.g., libbpf_set_print(),
+ * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs
+ * to ensure synchronization if there is a risk of running this API from
+ * multiple threads simultaneously.
+ */
+LIBBPF_API int libbpf_register_prog_handler(const char *sec,
+ enum bpf_prog_type prog_type,
+ enum bpf_attach_type exp_attach_type,
+ const struct libbpf_prog_handler_opts *opts);
+/**
+ * @brief *libbpf_unregister_prog_handler()* unregisters previously registered
+ * custom BPF program SEC() handler.
+ * @param handler_id handler ID returned by *libbpf_register_prog_handler()*
+ * after successful registration
+ * @return 0 on success, negative error code if handler isn't found
+ *
+ * Note, like much of global libbpf APIs (e.g., libbpf_set_print(),
+ * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs
+ * to ensure synchronization if there is a risk of running this API from
+ * multiple threads simultaneously.
+ */
+LIBBPF_API int libbpf_unregister_prog_handler(int handler_id);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 47e70c9058d9..dd35ee58bfaa 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -439,3 +439,12 @@ LIBBPF_0.7.0 {
libbpf_probe_bpf_prog_type;
libbpf_set_memlock_rlim_max;
} LIBBPF_0.6.0;
+
+LIBBPF_0.8.0 {
+ global:
+ bpf_object__destroy_subskeleton;
+ bpf_object__open_subskeleton;
+ libbpf_register_prog_handler;
+ libbpf_unregister_prog_handler;
+ bpf_program__attach_kprobe_multi_opts;
+} LIBBPF_0.7.0;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 4fda8bdf0a0d..b6247dc7f8eb 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -449,6 +449,11 @@ __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
extern enum libbpf_strict_mode libbpf_mode;
+typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
+ const char *sym_name, void *ctx);
+
+int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *arg);
+
/* handle direct returned errors */
static inline int libbpf_err(int ret)
{
diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h
index a283cf031665..d7bcbd01f66f 100644
--- a/tools/lib/bpf/libbpf_legacy.h
+++ b/tools/lib/bpf/libbpf_legacy.h
@@ -54,6 +54,10 @@ enum libbpf_strict_mode {
*
* Note, in this mode the program pin path will be based on the
* function name instead of section name.
+ *
+ * Additionally, routines in the .text section are always considered
+ * sub-programs. Legacy behavior allows for a single routine in .text
+ * to be a program.
*/
LIBBPF_STRICT_SEC_NAME = 0x04,
/*
diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h
index 0fefefc3500b..61f2039404b6 100644
--- a/tools/lib/bpf/libbpf_version.h
+++ b/tools/lib/bpf/libbpf_version.h
@@ -4,6 +4,6 @@
#define __LIBBPF_VERSION_H
#define LIBBPF_MAJOR_VERSION 0
-#define LIBBPF_MINOR_VERSION 7
+#define LIBBPF_MINOR_VERSION 8
#endif /* __LIBBPF_VERSION_H */
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index edafe56664f3..af136f73b09d 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -481,8 +481,8 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
BPF_EMIT_CALL(BPF_FUNC_redirect_map),
BPF_EXIT_INSN(),
};
- size_t insns_cnt[] = {sizeof(prog) / sizeof(struct bpf_insn),
- sizeof(prog_redirect_flags) / sizeof(struct bpf_insn),
+ size_t insns_cnt[] = {ARRAY_SIZE(prog),
+ ARRAY_SIZE(prog_redirect_flags),
};
struct bpf_insn *progs[] = {prog, prog_redirect_flags};
enum xsk_prog option = get_xsk_prog();
@@ -1193,12 +1193,23 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
int xsk_umem__delete(struct xsk_umem *umem)
{
+ struct xdp_mmap_offsets off;
+ int err;
+
if (!umem)
return 0;
if (umem->refcount)
return -EBUSY;
+ err = xsk_get_mmap_offsets(umem->fd, &off);
+ if (!err && umem->fill_save && umem->comp_save) {
+ munmap(umem->fill_save->ring - off.fr.desc,
+ off.fr.desc + umem->config.fill_size * sizeof(__u64));
+ munmap(umem->comp_save->ring - off.cr.desc,
+ off.cr.desc + umem->config.comp_size * sizeof(__u64));
+ }
+
close(umem->fd);
free(umem);
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 79d102304470..a2335e402145 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -89,6 +89,9 @@ ifeq ($(CC_NO_CLANG), 1)
EXTRA_WARNINGS += -Wstrict-aliasing=3
else ifneq ($(CROSS_COMPILE),)
+# Allow userspace to override CLANG_CROSS_FLAGS to specify their own
+# sysroots and flags or to avoid the GCC call in pure Clang builds.
+ifeq ($(CLANG_CROSS_FLAGS),)
CLANG_CROSS_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)gcc 2>/dev/null))
ifneq ($(GCC_TOOLCHAIN_DIR),)
@@ -96,6 +99,7 @@ CLANG_CROSS_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
CLANG_CROSS_FLAGS += --sysroot=$(shell $(CROSS_COMPILE)gcc -print-sysroot)
CLANG_CROSS_FLAGS += --gcc-toolchain=$(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif # GCC_TOOLCHAIN_DIR
+endif # CLANG_CROSS_FLAGS
CFLAGS += $(CLANG_CROSS_FLAGS)
AFLAGS += $(CLANG_CROSS_FLAGS)
endif # CROSS_COMPILE
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index a7eead8820a0..595565eb68c0 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -31,6 +31,7 @@ test_tcp_check_syncookie_user
test_sysctl
xdping
test_cpp
+*.subskel.h
*.skel.h
*.lskel.h
/no_alu32
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index fe12b4f5fe20..3820608faf57 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -25,7 +25,7 @@ CFLAGS += -g -O0 -rdynamic -Wall -Werror $(GENFLAGS) $(SAN_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
-I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT)
LDFLAGS += $(SAN_CFLAGS)
-LDLIBS += -lcap -lelf -lz -lrt -lpthread
+LDLIBS += -lelf -lz -lrt -lpthread
# Silence some warnings when compiled with clang
ifneq ($(LLVM),)
@@ -195,6 +195,7 @@ $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o
TESTING_HELPERS := $(OUTPUT)/testing_helpers.o
TRACE_HELPERS := $(OUTPUT)/trace_helpers.o
+CAP_HELPERS := $(OUTPUT)/cap_helpers.o
$(OUTPUT)/test_dev_cgroup: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_skb_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS)
@@ -211,7 +212,7 @@ $(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS)
$(OUTPUT)/xdping: $(TESTING_HELPERS)
$(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS)
$(OUTPUT)/test_maps: $(TESTING_HELPERS)
-$(OUTPUT)/test_verifier: $(TESTING_HELPERS)
+$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS)
BPFTOOL ?= $(DEFAULT_BPFTOOL)
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
@@ -326,7 +327,13 @@ endef
SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
- linked_vars.skel.h linked_maps.skel.h
+ linked_vars.skel.h linked_maps.skel.h \
+ test_subskeleton.skel.h test_subskeleton_lib.skel.h
+
+# In the subskeleton case, we want the test_subskeleton_lib.subskel.h file
+# but that's created as a side-effect of the skel.h generation.
+test_subskeleton.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o test_subskeleton.o
+test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \
@@ -404,6 +411,7 @@ $(TRUNNER_BPF_SKELS): %.skel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o)
$(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
+ $(Q)$$(BPFTOOL) gen subskeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$(@:.skel.h=.subskel.h)
$(TRUNNER_BPF_LSKELS): %.lskel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
@@ -421,6 +429,7 @@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)diff $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
$(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@
+ $(Q)$$(BPFTOOL) gen subskeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$(@:.skel.h=.subskel.h)
endif
# ensure we set up tests.h header generation rule just once
@@ -479,7 +488,8 @@ TRUNNER_TESTS_DIR := prog_tests
TRUNNER_BPF_PROGS_DIR := progs
TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
network_helpers.c testing_helpers.c \
- btf_helpers.c flow_dissector_load.h
+ btf_helpers.c flow_dissector_load.h \
+ cap_helpers.c
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
ima_setup.sh \
$(wildcard progs/btf_dump_test_case_*.c)
@@ -557,6 +567,6 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature bpftool \
- $(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h no_alu32 bpf_gcc bpf_testmod.ko)
+ $(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h no_alu32 bpf_gcc bpf_testmod.ko)
.PHONY: docs docs-clean
diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst
index d099d91adc3b..eb1b7541f39d 100644
--- a/tools/testing/selftests/bpf/README.rst
+++ b/tools/testing/selftests/bpf/README.rst
@@ -32,11 +32,19 @@ For more information on about using the script, run:
$ tools/testing/selftests/bpf/vmtest.sh -h
+In case of linker errors when running selftests, try using static linking:
+
+.. code-block:: console
+
+ $ LDLIBS=-static vmtest.sh
+
+.. note:: Some distros may not support static linking.
+
.. note:: The script uses pahole and clang based on host environment setting.
If you want to change pahole and llvm, you can change `PATH` environment
variable in the beginning of script.
-.. note:: The script currently only supports x86_64.
+.. note:: The script currently only supports x86_64 and s390x architectures.
Additional information about selftest failures are
documented here.
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 27d63be47b95..e585e1cefc77 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -33,6 +33,10 @@ struct bpf_testmod_btf_type_tag_2 {
struct bpf_testmod_btf_type_tag_1 __user *p;
};
+struct bpf_testmod_btf_type_tag_3 {
+ struct bpf_testmod_btf_type_tag_1 __percpu *p;
+};
+
noinline int
bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
BTF_TYPE_EMIT(func_proto_typedef);
@@ -46,6 +50,16 @@ bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
return arg->p->a;
}
+noinline int
+bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
+ return arg->a;
+}
+
+noinline int
+bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
+ return arg->p->a;
+}
+
noinline int bpf_testmod_loop_test(int n)
{
int i, sum = 0;
diff --git a/tools/testing/selftests/bpf/cap_helpers.c b/tools/testing/selftests/bpf/cap_helpers.c
new file mode 100644
index 000000000000..d5ac507401d7
--- /dev/null
+++ b/tools/testing/selftests/bpf/cap_helpers.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "cap_helpers.h"
+
+/* Avoid including <sys/capability.h> from the libcap-devel package,
+ * so directly declare them here and use them from glibc.
+ */
+int capget(cap_user_header_t header, cap_user_data_t data);
+int capset(cap_user_header_t header, const cap_user_data_t data);
+
+int cap_enable_effective(__u64 caps, __u64 *old_caps)
+{
+ struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
+ struct __user_cap_header_struct hdr = {
+ .version = _LINUX_CAPABILITY_VERSION_3,
+ };
+ __u32 cap0 = caps;
+ __u32 cap1 = caps >> 32;
+ int err;
+
+ err = capget(&hdr, data);
+ if (err)
+ return err;
+
+ if (old_caps)
+ *old_caps = (__u64)(data[1].effective) << 32 | data[0].effective;
+
+ if ((data[0].effective & cap0) == cap0 &&
+ (data[1].effective & cap1) == cap1)
+ return 0;
+
+ data[0].effective |= cap0;
+ data[1].effective |= cap1;
+ err = capset(&hdr, data);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int cap_disable_effective(__u64 caps, __u64 *old_caps)
+{
+ struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
+ struct __user_cap_header_struct hdr = {
+ .version = _LINUX_CAPABILITY_VERSION_3,
+ };
+ __u32 cap0 = caps;
+ __u32 cap1 = caps >> 32;
+ int err;
+
+ err = capget(&hdr, data);
+ if (err)
+ return err;
+
+ if (old_caps)
+ *old_caps = (__u64)(data[1].effective) << 32 | data[0].effective;
+
+ if (!(data[0].effective & cap0) && !(data[1].effective & cap1))
+ return 0;
+
+ data[0].effective &= ~cap0;
+ data[1].effective &= ~cap1;
+ err = capset(&hdr, data);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/cap_helpers.h b/tools/testing/selftests/bpf/cap_helpers.h
new file mode 100644
index 000000000000..6d163530cb0f
--- /dev/null
+++ b/tools/testing/selftests/bpf/cap_helpers.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __CAP_HELPERS_H
+#define __CAP_HELPERS_H
+
+#include <linux/types.h>
+#include <linux/capability.h>
+
+#ifndef CAP_PERFMON
+#define CAP_PERFMON 38
+#endif
+
+#ifndef CAP_BPF
+#define CAP_BPF 39
+#endif
+
+int cap_enable_effective(__u64 caps, __u64 *old_caps);
+int cap_disable_effective(__u64 caps, __u64 *old_caps);
+
+#endif
diff --git a/tools/testing/selftests/bpf/ima_setup.sh b/tools/testing/selftests/bpf/ima_setup.sh
index 8e62581113a3..8ecead4ccad0 100755
--- a/tools/testing/selftests/bpf/ima_setup.sh
+++ b/tools/testing/selftests/bpf/ima_setup.sh
@@ -12,7 +12,7 @@ LOG_FILE="$(mktemp /tmp/ima_setup.XXXX.log)"
usage()
{
- echo "Usage: $0 <setup|cleanup|run> <existing_tmp_dir>"
+ echo "Usage: $0 <setup|cleanup|run|modify-bin|restore-bin|load-policy> <existing_tmp_dir>"
exit 1
}
@@ -51,6 +51,7 @@ setup()
ensure_mount_securityfs
echo "measure func=BPRM_CHECK fsuuid=${mount_uuid}" > ${IMA_POLICY_FILE}
+ echo "measure func=BPRM_CHECK fsuuid=${mount_uuid}" > ${mount_dir}/policy_test
}
cleanup() {
@@ -77,6 +78,32 @@ run()
exec "${copied_bin_path}"
}
+modify_bin()
+{
+ local tmp_dir="$1"
+ local mount_dir="${tmp_dir}/mnt"
+ local copied_bin_path="${mount_dir}/$(basename ${TEST_BINARY})"
+
+ echo "mod" >> "${copied_bin_path}"
+}
+
+restore_bin()
+{
+ local tmp_dir="$1"
+ local mount_dir="${tmp_dir}/mnt"
+ local copied_bin_path="${mount_dir}/$(basename ${TEST_BINARY})"
+
+ truncate -s -4 "${copied_bin_path}"
+}
+
+load_policy()
+{
+ local tmp_dir="$1"
+ local mount_dir="${tmp_dir}/mnt"
+
+ echo ${mount_dir}/policy_test > ${IMA_POLICY_FILE} 2> /dev/null
+}
+
catch()
{
local exit_code="$1"
@@ -105,6 +132,12 @@ main()
cleanup "${tmp_dir}"
elif [[ "${action}" == "run" ]]; then
run "${tmp_dir}"
+ elif [[ "${action}" == "modify-bin" ]]; then
+ modify_bin "${tmp_dir}"
+ elif [[ "${action}" == "restore-bin" ]]; then
+ restore_bin "${tmp_dir}"
+ elif [[ "${action}" == "load-policy" ]]; then
+ load_policy "${tmp_dir}"
else
echo "Unknown action: ${action}"
exit 1
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 6db1af8fdee7..2bb1f9b3841d 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -1,18 +1,25 @@
// SPDX-License-Identifier: GPL-2.0-only
+#define _GNU_SOURCE
+
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
+#include <sched.h>
#include <arpa/inet.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
#include <linux/err.h>
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/limits.h>
#include "bpf_util.h"
#include "network_helpers.h"
+#include "test_progs.h"
#define clean_errno() (errno == 0 ? "None" : strerror(errno))
#define log_err(MSG, ...) ({ \
@@ -356,3 +363,82 @@ char *ping_command(int family)
}
return "ping";
}
+
+struct nstoken {
+ int orig_netns_fd;
+};
+
+static int setns_by_fd(int nsfd)
+{
+ int err;
+
+ err = setns(nsfd, CLONE_NEWNET);
+ close(nsfd);
+
+ if (!ASSERT_OK(err, "setns"))
+ return err;
+
+ /* Switch /sys to the new namespace so that e.g. /sys/class/net
+ * reflects the devices in the new namespace.
+ */
+ err = unshare(CLONE_NEWNS);
+ if (!ASSERT_OK(err, "unshare"))
+ return err;
+
+ /* Make our /sys mount private, so the following umount won't
+ * trigger the global umount in case it's shared.
+ */
+ err = mount("none", "/sys", NULL, MS_PRIVATE, NULL);
+ if (!ASSERT_OK(err, "remount private /sys"))
+ return err;
+
+ err = umount2("/sys", MNT_DETACH);
+ if (!ASSERT_OK(err, "umount2 /sys"))
+ return err;
+
+ err = mount("sysfs", "/sys", "sysfs", 0, NULL);
+ if (!ASSERT_OK(err, "mount /sys"))
+ return err;
+
+ err = mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL);
+ if (!ASSERT_OK(err, "mount /sys/fs/bpf"))
+ return err;
+
+ return 0;
+}
+
+struct nstoken *open_netns(const char *name)
+{
+ int nsfd;
+ char nspath[PATH_MAX];
+ int err;
+ struct nstoken *token;
+
+ token = malloc(sizeof(struct nstoken));
+ if (!ASSERT_OK_PTR(token, "malloc token"))
+ return NULL;
+
+ token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net"))
+ goto fail;
+
+ snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name);
+ nsfd = open(nspath, O_RDONLY | O_CLOEXEC);
+ if (!ASSERT_GE(nsfd, 0, "open netns fd"))
+ goto fail;
+
+ err = setns_by_fd(nsfd);
+ if (!ASSERT_OK(err, "setns_by_fd"))
+ goto fail;
+
+ return token;
+fail:
+ free(token);
+ return NULL;
+}
+
+void close_netns(struct nstoken *token)
+{
+ ASSERT_OK(setns_by_fd(token->orig_netns_fd), "setns_by_fd");
+ free(token);
+}
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index d198181a5648..a4b3b2f9877b 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -55,4 +55,13 @@ int make_sockaddr(int family, const char *addr_str, __u16 port,
struct sockaddr_storage *addr, socklen_t *len);
char *ping_command(int family);
+struct nstoken;
+/**
+ * open_netns() - Switch to specified network namespace by name.
+ *
+ * Returns token with which to restore the original namespace
+ * using close_netns().
+ */
+struct nstoken *open_netns(const char *name);
+void close_netns(struct nstoken *token);
#endif
diff --git a/tools/testing/selftests/bpf/prog_tests/bind_perm.c b/tools/testing/selftests/bpf/prog_tests/bind_perm.c
index eac71fbb24ce..a1766a298bb7 100644
--- a/tools/testing/selftests/bpf/prog_tests/bind_perm.c
+++ b/tools/testing/selftests/bpf/prog_tests/bind_perm.c
@@ -4,9 +4,9 @@
#include <stdlib.h>
#include <sys/types.h>
#include <sys/socket.h>
-#include <sys/capability.h>
#include "test_progs.h"
+#include "cap_helpers.h"
#include "bind_perm.skel.h"
static int duration;
@@ -49,41 +49,11 @@ close_socket:
close(fd);
}
-bool cap_net_bind_service(cap_flag_value_t flag)
-{
- const cap_value_t cap_net_bind_service = CAP_NET_BIND_SERVICE;
- cap_flag_value_t original_value;
- bool was_effective = false;
- cap_t caps;
-
- caps = cap_get_proc();
- if (CHECK(!caps, "cap_get_proc", "errno %d", errno))
- goto free_caps;
-
- if (CHECK(cap_get_flag(caps, CAP_NET_BIND_SERVICE, CAP_EFFECTIVE,
- &original_value),
- "cap_get_flag", "errno %d", errno))
- goto free_caps;
-
- was_effective = (original_value == CAP_SET);
-
- if (CHECK(cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_bind_service,
- flag),
- "cap_set_flag", "errno %d", errno))
- goto free_caps;
-
- if (CHECK(cap_set_proc(caps), "cap_set_proc", "errno %d", errno))
- goto free_caps;
-
-free_caps:
- CHECK(cap_free(caps), "cap_free", "errno %d", errno);
- return was_effective;
-}
-
void test_bind_perm(void)
{
- bool cap_was_effective;
+ const __u64 net_bind_svc_cap = 1ULL << CAP_NET_BIND_SERVICE;
struct bind_perm *skel;
+ __u64 old_caps = 0;
int cgroup_fd;
if (create_netns())
@@ -105,7 +75,8 @@ void test_bind_perm(void)
if (!ASSERT_OK_PTR(skel, "bind_v6_prog"))
goto close_skeleton;
- cap_was_effective = cap_net_bind_service(CAP_CLEAR);
+ ASSERT_OK(cap_disable_effective(net_bind_svc_cap, &old_caps),
+ "cap_disable_effective");
try_bind(AF_INET, 110, EACCES);
try_bind(AF_INET6, 110, EACCES);
@@ -113,8 +84,9 @@ void test_bind_perm(void)
try_bind(AF_INET, 111, 0);
try_bind(AF_INET6, 111, 0);
- if (cap_was_effective)
- cap_net_bind_service(CAP_SET);
+ if (old_caps & net_bind_svc_cap)
+ ASSERT_OK(cap_enable_effective(net_bind_svc_cap, NULL),
+ "cap_enable_effective");
close_skeleton:
bind_perm__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index cd10df6cd0fc..923a6139b2d8 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -7,6 +7,7 @@
#include <unistd.h>
#include <test_progs.h>
#include "test_bpf_cookie.skel.h"
+#include "kprobe_multi.skel.h"
/* uprobe attach point */
static void trigger_func(void)
@@ -63,6 +64,178 @@ cleanup:
bpf_link__destroy(retlink2);
}
+static void kprobe_multi_test_run(struct kprobe_multi *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result");
+ ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result");
+ ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result");
+ ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result");
+ ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result");
+ ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result");
+ ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result");
+ ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result");
+
+ ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result");
+ ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result");
+ ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result");
+ ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result");
+ ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result");
+ ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result");
+ ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result");
+ ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result");
+}
+
+static void kprobe_multi_link_api_subtest(void)
+{
+ int prog_fd, link1_fd = -1, link2_fd = -1;
+ struct kprobe_multi *skel = NULL;
+ LIBBPF_OPTS(bpf_link_create_opts, opts);
+ unsigned long long addrs[8];
+ __u64 cookies[8];
+
+ if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
+ goto cleanup;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ skel->bss->test_cookie = true;
+
+#define GET_ADDR(__sym, __addr) ({ \
+ __addr = ksym_get_addr(__sym); \
+ if (!ASSERT_NEQ(__addr, 0, "ksym_get_addr " #__sym)) \
+ goto cleanup; \
+})
+
+ GET_ADDR("bpf_fentry_test1", addrs[0]);
+ GET_ADDR("bpf_fentry_test2", addrs[1]);
+ GET_ADDR("bpf_fentry_test3", addrs[2]);
+ GET_ADDR("bpf_fentry_test4", addrs[3]);
+ GET_ADDR("bpf_fentry_test5", addrs[4]);
+ GET_ADDR("bpf_fentry_test6", addrs[5]);
+ GET_ADDR("bpf_fentry_test7", addrs[6]);
+ GET_ADDR("bpf_fentry_test8", addrs[7]);
+
+#undef GET_ADDR
+
+ cookies[0] = 1;
+ cookies[1] = 2;
+ cookies[2] = 3;
+ cookies[3] = 4;
+ cookies[4] = 5;
+ cookies[5] = 6;
+ cookies[6] = 7;
+ cookies[7] = 8;
+
+ opts.kprobe_multi.addrs = (const unsigned long *) &addrs;
+ opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
+ opts.kprobe_multi.cookies = (const __u64 *) &cookies;
+ prog_fd = bpf_program__fd(skel->progs.test_kprobe);
+
+ link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
+ goto cleanup;
+
+ cookies[0] = 8;
+ cookies[1] = 7;
+ cookies[2] = 6;
+ cookies[3] = 5;
+ cookies[4] = 4;
+ cookies[5] = 3;
+ cookies[6] = 2;
+ cookies[7] = 1;
+
+ opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
+ prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
+
+ link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link2_fd, 0, "link2_fd"))
+ goto cleanup;
+
+ kprobe_multi_test_run(skel);
+
+cleanup:
+ close(link1_fd);
+ close(link2_fd);
+ kprobe_multi__destroy(skel);
+}
+
+static void kprobe_multi_attach_api_subtest(void)
+{
+ struct bpf_link *link1 = NULL, *link2 = NULL;
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct kprobe_multi *skel = NULL;
+ const char *syms[8] = {
+ "bpf_fentry_test1",
+ "bpf_fentry_test2",
+ "bpf_fentry_test3",
+ "bpf_fentry_test4",
+ "bpf_fentry_test5",
+ "bpf_fentry_test6",
+ "bpf_fentry_test7",
+ "bpf_fentry_test8",
+ };
+ __u64 cookies[8];
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ skel->bss->test_cookie = true;
+
+ cookies[0] = 1;
+ cookies[1] = 2;
+ cookies[2] = 3;
+ cookies[3] = 4;
+ cookies[4] = 5;
+ cookies[5] = 6;
+ cookies[6] = 7;
+ cookies[7] = 8;
+
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+ opts.cookies = cookies;
+
+ link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ NULL, &opts);
+ if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+
+ cookies[0] = 8;
+ cookies[1] = 7;
+ cookies[2] = 6;
+ cookies[3] = 5;
+ cookies[4] = 4;
+ cookies[5] = 3;
+ cookies[6] = 2;
+ cookies[7] = 1;
+
+ opts.retprobe = true;
+
+ link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe,
+ NULL, &opts);
+ if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+
+ kprobe_multi_test_run(skel);
+
+cleanup:
+ bpf_link__destroy(link2);
+ bpf_link__destroy(link1);
+ kprobe_multi__destroy(skel);
+}
static void uprobe_subtest(struct test_bpf_cookie *skel)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
@@ -199,7 +372,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
- attr.sample_freq = 4000;
+ attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;
@@ -249,6 +422,10 @@ void test_bpf_cookie(void)
if (test__start_subtest("kprobe"))
kprobe_subtest(skel);
+ if (test__start_subtest("multi_kprobe_link_api"))
+ kprobe_multi_link_api_subtest();
+ if (test__start_subtest("multi_kprobe_attach_api"))
+ kprobe_multi_attach_api_subtest();
if (test__start_subtest("uprobe"))
uprobe_subtest(skel);
if (test__start_subtest("tracepoint"))
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_tag.c b/tools/testing/selftests/bpf/prog_tests/btf_tag.c
index f7560b54a6bb..071430cd54de 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_tag.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_tag.c
@@ -10,6 +10,7 @@ struct btf_type_tag_test {
};
#include "btf_type_tag.skel.h"
#include "btf_type_tag_user.skel.h"
+#include "btf_type_tag_percpu.skel.h"
static void test_btf_decl_tag(void)
{
@@ -43,38 +44,81 @@ static void test_btf_type_tag(void)
btf_type_tag__destroy(skel);
}
-static void test_btf_type_tag_mod_user(bool load_test_user1)
+/* loads vmlinux_btf as well as module_btf. If the caller passes NULL as
+ * module_btf, it will not load module btf.
+ *
+ * Returns 0 on success.
+ * Return -1 On error. In case of error, the loaded btf will be freed and the
+ * input parameters will be set to pointing to NULL.
+ */
+static int load_btfs(struct btf **vmlinux_btf, struct btf **module_btf,
+ bool needs_vmlinux_tag)
{
const char *module_name = "bpf_testmod";
- struct btf *vmlinux_btf, *module_btf;
- struct btf_type_tag_user *skel;
__s32 type_id;
- int err;
if (!env.has_testmod) {
test__skip();
- return;
+ return -1;
}
- /* skip the test if the module does not have __user tags */
- vmlinux_btf = btf__load_vmlinux_btf();
- if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF"))
- return;
+ *vmlinux_btf = btf__load_vmlinux_btf();
+ if (!ASSERT_OK_PTR(*vmlinux_btf, "could not load vmlinux BTF"))
+ return -1;
+
+ if (!needs_vmlinux_tag)
+ goto load_module_btf;
- module_btf = btf__load_module_btf(module_name, vmlinux_btf);
- if (!ASSERT_OK_PTR(module_btf, "could not load module BTF"))
+ /* skip the test if the vmlinux does not have __user tags */
+ type_id = btf__find_by_name_kind(*vmlinux_btf, "user", BTF_KIND_TYPE_TAG);
+ if (type_id <= 0) {
+ printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__);
+ test__skip();
goto free_vmlinux_btf;
+ }
- type_id = btf__find_by_name_kind(module_btf, "user", BTF_KIND_TYPE_TAG);
+load_module_btf:
+ /* skip loading module_btf, if not requested by caller */
+ if (!module_btf)
+ return 0;
+
+ *module_btf = btf__load_module_btf(module_name, *vmlinux_btf);
+ if (!ASSERT_OK_PTR(*module_btf, "could not load module BTF"))
+ goto free_vmlinux_btf;
+
+ /* skip the test if the module does not have __user tags */
+ type_id = btf__find_by_name_kind(*module_btf, "user", BTF_KIND_TYPE_TAG);
if (type_id <= 0) {
printf("%s:SKIP: btf_type_tag attribute not in %s", __func__, module_name);
test__skip();
goto free_module_btf;
}
+ return 0;
+
+free_module_btf:
+ btf__free(*module_btf);
+free_vmlinux_btf:
+ btf__free(*vmlinux_btf);
+
+ *vmlinux_btf = NULL;
+ if (module_btf)
+ *module_btf = NULL;
+ return -1;
+}
+
+static void test_btf_type_tag_mod_user(bool load_test_user1)
+{
+ struct btf *vmlinux_btf = NULL, *module_btf = NULL;
+ struct btf_type_tag_user *skel;
+ int err;
+
+ if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false))
+ return;
+
skel = btf_type_tag_user__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_user"))
- goto free_module_btf;
+ goto cleanup;
bpf_program__set_autoload(skel->progs.test_sys_getsockname, false);
if (load_test_user1)
@@ -87,34 +131,23 @@ static void test_btf_type_tag_mod_user(bool load_test_user1)
btf_type_tag_user__destroy(skel);
-free_module_btf:
+cleanup:
btf__free(module_btf);
-free_vmlinux_btf:
btf__free(vmlinux_btf);
}
static void test_btf_type_tag_vmlinux_user(void)
{
struct btf_type_tag_user *skel;
- struct btf *vmlinux_btf;
- __s32 type_id;
+ struct btf *vmlinux_btf = NULL;
int err;
- /* skip the test if the vmlinux does not have __user tags */
- vmlinux_btf = btf__load_vmlinux_btf();
- if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF"))
+ if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true))
return;
- type_id = btf__find_by_name_kind(vmlinux_btf, "user", BTF_KIND_TYPE_TAG);
- if (type_id <= 0) {
- printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__);
- test__skip();
- goto free_vmlinux_btf;
- }
-
skel = btf_type_tag_user__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_user"))
- goto free_vmlinux_btf;
+ goto cleanup;
bpf_program__set_autoload(skel->progs.test_user2, false);
bpf_program__set_autoload(skel->progs.test_user1, false);
@@ -124,7 +157,70 @@ static void test_btf_type_tag_vmlinux_user(void)
btf_type_tag_user__destroy(skel);
-free_vmlinux_btf:
+cleanup:
+ btf__free(vmlinux_btf);
+}
+
+static void test_btf_type_tag_mod_percpu(bool load_test_percpu1)
+{
+ struct btf *vmlinux_btf, *module_btf;
+ struct btf_type_tag_percpu *skel;
+ int err;
+
+ if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false))
+ return;
+
+ skel = btf_type_tag_percpu__open();
+ if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu"))
+ goto cleanup;
+
+ bpf_program__set_autoload(skel->progs.test_percpu_load, false);
+ bpf_program__set_autoload(skel->progs.test_percpu_helper, false);
+ if (load_test_percpu1)
+ bpf_program__set_autoload(skel->progs.test_percpu2, false);
+ else
+ bpf_program__set_autoload(skel->progs.test_percpu1, false);
+
+ err = btf_type_tag_percpu__load(skel);
+ ASSERT_ERR(err, "btf_type_tag_percpu");
+
+ btf_type_tag_percpu__destroy(skel);
+
+cleanup:
+ btf__free(module_btf);
+ btf__free(vmlinux_btf);
+}
+
+static void test_btf_type_tag_vmlinux_percpu(bool load_test)
+{
+ struct btf_type_tag_percpu *skel;
+ struct btf *vmlinux_btf = NULL;
+ int err;
+
+ if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true))
+ return;
+
+ skel = btf_type_tag_percpu__open();
+ if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu"))
+ goto cleanup;
+
+ bpf_program__set_autoload(skel->progs.test_percpu2, false);
+ bpf_program__set_autoload(skel->progs.test_percpu1, false);
+ if (load_test) {
+ bpf_program__set_autoload(skel->progs.test_percpu_helper, false);
+
+ err = btf_type_tag_percpu__load(skel);
+ ASSERT_ERR(err, "btf_type_tag_percpu_load");
+ } else {
+ bpf_program__set_autoload(skel->progs.test_percpu_load, false);
+
+ err = btf_type_tag_percpu__load(skel);
+ ASSERT_OK(err, "btf_type_tag_percpu_helper");
+ }
+
+ btf_type_tag_percpu__destroy(skel);
+
+cleanup:
btf__free(vmlinux_btf);
}
@@ -134,10 +230,20 @@ void test_btf_tag(void)
test_btf_decl_tag();
if (test__start_subtest("btf_type_tag"))
test_btf_type_tag();
+
if (test__start_subtest("btf_type_tag_user_mod1"))
test_btf_type_tag_mod_user(true);
if (test__start_subtest("btf_type_tag_user_mod2"))
test_btf_type_tag_mod_user(false);
if (test__start_subtest("btf_type_tag_sys_user_vmlinux"))
test_btf_type_tag_vmlinux_user();
+
+ if (test__start_subtest("btf_type_tag_percpu_mod1"))
+ test_btf_type_tag_mod_percpu(true);
+ if (test__start_subtest("btf_type_tag_percpu_mod2"))
+ test_btf_type_tag_mod_percpu(false);
+ if (test__start_subtest("btf_type_tag_percpu_vmlinux_load"))
+ test_btf_type_tag_vmlinux_percpu(true);
+ if (test__start_subtest("btf_type_tag_percpu_vmlinux_helper"))
+ test_btf_type_tag_vmlinux_percpu(false);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
index 858916d11e2e..9367bd2f0ae1 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
@@ -14,7 +14,7 @@ static int prog_load(void)
BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = 1 */
BPF_EXIT_INSN(),
};
- size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+ size_t insns_cnt = ARRAY_SIZE(prog);
return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
index 38b3c47293da..db0b7bac78d1 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
@@ -63,7 +63,7 @@ static int prog_load_cnt(int verdict, int val)
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
- size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+ size_t insns_cnt = ARRAY_SIZE(prog);
int ret;
ret = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
index 356547e849e2..9421a5b7f4e1 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
@@ -16,7 +16,7 @@ static int prog_load(int verdict)
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
- size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+ size_t insns_cnt = ARRAY_SIZE(prog);
return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
diff --git a/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c b/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c
new file mode 100644
index 000000000000..b2dfc5954aea
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include <test_progs.h>
+#include "test_custom_sec_handlers.skel.h"
+
+#define COOKIE_ABC1 1
+#define COOKIE_ABC2 2
+#define COOKIE_CUSTOM 3
+#define COOKIE_FALLBACK 4
+#define COOKIE_KPROBE 5
+
+static int custom_setup_prog(struct bpf_program *prog, long cookie)
+{
+ if (cookie == COOKIE_ABC1)
+ bpf_program__set_autoload(prog, false);
+
+ return 0;
+}
+
+static int custom_prepare_load_prog(struct bpf_program *prog,
+ struct bpf_prog_load_opts *opts, long cookie)
+{
+ if (cookie == COOKIE_FALLBACK)
+ opts->prog_flags |= BPF_F_SLEEPABLE;
+ else if (cookie == COOKIE_ABC1)
+ ASSERT_FALSE(true, "unexpected preload for abc");
+
+ return 0;
+}
+
+static int custom_attach_prog(const struct bpf_program *prog, long cookie,
+ struct bpf_link **link)
+{
+ switch (cookie) {
+ case COOKIE_ABC2:
+ *link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
+ return libbpf_get_error(*link);
+ case COOKIE_CUSTOM:
+ *link = bpf_program__attach_tracepoint(prog, "syscalls", "sys_enter_nanosleep");
+ return libbpf_get_error(*link);
+ case COOKIE_KPROBE:
+ case COOKIE_FALLBACK:
+ /* no auto-attach for SEC("xyz") and SEC("kprobe") */
+ *link = NULL;
+ return 0;
+ default:
+ ASSERT_FALSE(true, "unexpected cookie");
+ return -EINVAL;
+ }
+}
+
+static int abc1_id;
+static int abc2_id;
+static int custom_id;
+static int fallback_id;
+static int kprobe_id;
+
+__attribute__((constructor))
+static void register_sec_handlers(void)
+{
+ LIBBPF_OPTS(libbpf_prog_handler_opts, abc1_opts,
+ .cookie = COOKIE_ABC1,
+ .prog_setup_fn = custom_setup_prog,
+ .prog_prepare_load_fn = custom_prepare_load_prog,
+ .prog_attach_fn = NULL,
+ );
+ LIBBPF_OPTS(libbpf_prog_handler_opts, abc2_opts,
+ .cookie = COOKIE_ABC2,
+ .prog_setup_fn = custom_setup_prog,
+ .prog_prepare_load_fn = custom_prepare_load_prog,
+ .prog_attach_fn = custom_attach_prog,
+ );
+ LIBBPF_OPTS(libbpf_prog_handler_opts, custom_opts,
+ .cookie = COOKIE_CUSTOM,
+ .prog_setup_fn = NULL,
+ .prog_prepare_load_fn = NULL,
+ .prog_attach_fn = custom_attach_prog,
+ );
+
+ abc1_id = libbpf_register_prog_handler("abc", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc1_opts);
+ abc2_id = libbpf_register_prog_handler("abc/", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc2_opts);
+ custom_id = libbpf_register_prog_handler("custom+", BPF_PROG_TYPE_TRACEPOINT, 0, &custom_opts);
+}
+
+__attribute__((destructor))
+static void unregister_sec_handlers(void)
+{
+ libbpf_unregister_prog_handler(abc1_id);
+ libbpf_unregister_prog_handler(abc2_id);
+ libbpf_unregister_prog_handler(custom_id);
+}
+
+void test_custom_sec_handlers(void)
+{
+ LIBBPF_OPTS(libbpf_prog_handler_opts, opts,
+ .prog_setup_fn = custom_setup_prog,
+ .prog_prepare_load_fn = custom_prepare_load_prog,
+ .prog_attach_fn = custom_attach_prog,
+ );
+ struct test_custom_sec_handlers* skel;
+ int err;
+
+ ASSERT_GT(abc1_id, 0, "abc1_id");
+ ASSERT_GT(abc2_id, 0, "abc2_id");
+ ASSERT_GT(custom_id, 0, "custom_id");
+
+ /* override libbpf's handle of SEC("kprobe/...") but also allow pure
+ * SEC("kprobe") due to "kprobe+" specifier. Register it as
+ * TRACEPOINT, just for fun.
+ */
+ opts.cookie = COOKIE_KPROBE;
+ kprobe_id = libbpf_register_prog_handler("kprobe+", BPF_PROG_TYPE_TRACEPOINT, 0, &opts);
+ /* fallback treats everything as BPF_PROG_TYPE_SYSCALL program to test
+ * setting custom BPF_F_SLEEPABLE bit in preload handler
+ */
+ opts.cookie = COOKIE_FALLBACK;
+ fallback_id = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_SYSCALL, 0, &opts);
+
+ if (!ASSERT_GT(fallback_id, 0, "fallback_id") /* || !ASSERT_GT(kprobe_id, 0, "kprobe_id")*/) {
+ if (fallback_id > 0)
+ libbpf_unregister_prog_handler(fallback_id);
+ if (kprobe_id > 0)
+ libbpf_unregister_prog_handler(kprobe_id);
+ return;
+ }
+
+ /* open skeleton and validate assumptions */
+ skel = test_custom_sec_handlers__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__type(skel->progs.abc1), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc1_type");
+ ASSERT_FALSE(bpf_program__autoload(skel->progs.abc1), "abc1_autoload");
+
+ ASSERT_EQ(bpf_program__type(skel->progs.abc2), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc2_type");
+ ASSERT_EQ(bpf_program__type(skel->progs.custom1), BPF_PROG_TYPE_TRACEPOINT, "custom1_type");
+ ASSERT_EQ(bpf_program__type(skel->progs.custom2), BPF_PROG_TYPE_TRACEPOINT, "custom2_type");
+ ASSERT_EQ(bpf_program__type(skel->progs.kprobe1), BPF_PROG_TYPE_TRACEPOINT, "kprobe1_type");
+ ASSERT_EQ(bpf_program__type(skel->progs.xyz), BPF_PROG_TYPE_SYSCALL, "xyz_type");
+
+ skel->rodata->my_pid = getpid();
+
+ /* now attempt to load everything */
+ err = test_custom_sec_handlers__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ /* now try to auto-attach everything */
+ err = test_custom_sec_handlers__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ skel->links.xyz = bpf_program__attach(skel->progs.kprobe1);
+ ASSERT_EQ(errno, EOPNOTSUPP, "xyz_attach_err");
+ ASSERT_ERR_PTR(skel->links.xyz, "xyz_attach");
+
+ /* trigger programs */
+ usleep(1);
+
+ /* SEC("abc") is set to not auto-loaded */
+ ASSERT_FALSE(skel->bss->abc1_called, "abc1_called");
+ ASSERT_TRUE(skel->bss->abc2_called, "abc2_called");
+ ASSERT_TRUE(skel->bss->custom1_called, "custom1_called");
+ ASSERT_TRUE(skel->bss->custom2_called, "custom2_called");
+ /* SEC("kprobe") shouldn't be auto-attached */
+ ASSERT_FALSE(skel->bss->kprobe1_called, "kprobe1_called");
+ /* SEC("xyz") shouldn't be auto-attached */
+ ASSERT_FALSE(skel->bss->xyz_called, "xyz_called");
+
+cleanup:
+ test_custom_sec_handlers__destroy(skel);
+
+ ASSERT_OK(libbpf_unregister_prog_handler(fallback_id), "unregister_fallback");
+ ASSERT_OK(libbpf_unregister_prog_handler(kprobe_id), "unregister_kprobe");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/find_vma.c b/tools/testing/selftests/bpf/prog_tests/find_vma.c
index b74b3c0c555a..5165b38f0e59 100644
--- a/tools/testing/selftests/bpf/prog_tests/find_vma.c
+++ b/tools/testing/selftests/bpf/prog_tests/find_vma.c
@@ -7,12 +7,14 @@
#include "find_vma_fail1.skel.h"
#include "find_vma_fail2.skel.h"
-static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret)
+static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret, bool need_test)
{
- ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec");
- ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret");
- ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret");
- ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs");
+ if (need_test) {
+ ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec");
+ ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret");
+ ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret");
+ ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs");
+ }
skel->bss->found_vm_exec = 0;
skel->data->find_addr_ret = -1;
@@ -30,17 +32,26 @@ static int open_pe(void)
attr.type = PERF_TYPE_HARDWARE;
attr.config = PERF_COUNT_HW_CPU_CYCLES;
attr.freq = 1;
- attr.sample_freq = 4000;
+ attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
return pfd >= 0 ? pfd : -errno;
}
+static bool find_vma_pe_condition(struct find_vma *skel)
+{
+ return skel->bss->found_vm_exec == 0 ||
+ skel->data->find_addr_ret != 0 ||
+ skel->data->find_zero_ret == -1 ||
+ strcmp(skel->bss->d_iname, "test_progs") != 0;
+}
+
static void test_find_vma_pe(struct find_vma *skel)
{
struct bpf_link *link = NULL;
volatile int j = 0;
int pfd, i;
+ const int one_bn = 1000000000;
pfd = open_pe();
if (pfd < 0) {
@@ -57,10 +68,10 @@ static void test_find_vma_pe(struct find_vma *skel)
if (!ASSERT_OK_PTR(link, "attach_perf_event"))
goto cleanup;
- for (i = 0; i < 1000000; ++i)
+ for (i = 0; i < one_bn && find_vma_pe_condition(skel); ++i)
++j;
- test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */);
+ test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */, i == one_bn);
cleanup:
bpf_link__destroy(link);
close(pfd);
@@ -75,7 +86,7 @@ static void test_find_vma_kprobe(struct find_vma *skel)
return;
getpgid(skel->bss->target_pid);
- test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */);
+ test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */, true);
}
static void test_illegal_write_vma(void)
@@ -108,7 +119,6 @@ void serial_test_find_vma(void)
skel->bss->addr = (__u64)(uintptr_t)test_find_vma_pe;
test_find_vma_pe(skel);
- usleep(100000); /* allow the irq_work to finish */
test_find_vma_kprobe(skel);
find_vma__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c
index 6fb3d3155c35..027685858925 100644
--- a/tools/testing/selftests/bpf/prog_tests/global_data.c
+++ b/tools/testing/selftests/bpf/prog_tests/global_data.c
@@ -29,7 +29,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
{ "relocate .rodata reference", 10, ~0 },
};
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
CHECK(err || num != tests[i].num, tests[i].name,
"err %d result %llx expected %llx\n",
@@ -58,7 +58,7 @@ static void test_global_data_string(struct bpf_object *obj, __u32 duration)
{ "relocate .bss reference", 4, "\0\0hello" },
};
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, str);
CHECK(err || memcmp(str, tests[i].str, sizeof(str)),
tests[i].name, "err %d result \'%s\' expected \'%s\'\n",
@@ -92,7 +92,7 @@ static void test_global_data_struct(struct bpf_object *obj, __u32 duration)
{ "relocate .data reference", 3, { 41, 0xeeeeefef, 0x2111111111111111ULL, } },
};
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &val);
CHECK(err || memcmp(&val, &tests[i].val, sizeof(val)),
tests[i].name, "err %d result { %u, %u, %llu } expected { %u, %u, %llu }\n",
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
new file mode 100644
index 000000000000..b9876b55fc0c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "kprobe_multi.skel.h"
+#include "trace_helpers.h"
+
+static void kprobe_multi_test_run(struct kprobe_multi *skel, bool test_return)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result");
+ ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result");
+ ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result");
+ ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result");
+ ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result");
+ ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result");
+ ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result");
+ ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result");
+
+ if (test_return) {
+ ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result");
+ ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result");
+ ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result");
+ ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result");
+ ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result");
+ ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result");
+ ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result");
+ ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result");
+ }
+}
+
+static void test_skel_api(void)
+{
+ struct kprobe_multi *skel = NULL;
+ int err;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ err = kprobe_multi__attach(skel);
+ if (!ASSERT_OK(err, "kprobe_multi__attach"))
+ goto cleanup;
+
+ kprobe_multi_test_run(skel, true);
+
+cleanup:
+ kprobe_multi__destroy(skel);
+}
+
+static void test_link_api(struct bpf_link_create_opts *opts)
+{
+ int prog_fd, link1_fd = -1, link2_fd = -1;
+ struct kprobe_multi *skel = NULL;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ prog_fd = bpf_program__fd(skel->progs.test_kprobe);
+ link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts);
+ if (!ASSERT_GE(link1_fd, 0, "link_fd"))
+ goto cleanup;
+
+ opts->kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
+ prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
+ link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts);
+ if (!ASSERT_GE(link2_fd, 0, "link_fd"))
+ goto cleanup;
+
+ kprobe_multi_test_run(skel, true);
+
+cleanup:
+ if (link1_fd != -1)
+ close(link1_fd);
+ if (link2_fd != -1)
+ close(link2_fd);
+ kprobe_multi__destroy(skel);
+}
+
+#define GET_ADDR(__sym, __addr) ({ \
+ __addr = ksym_get_addr(__sym); \
+ if (!ASSERT_NEQ(__addr, 0, "kallsyms load failed for " #__sym)) \
+ return; \
+})
+
+static void test_link_api_addrs(void)
+{
+ LIBBPF_OPTS(bpf_link_create_opts, opts);
+ unsigned long long addrs[8];
+
+ GET_ADDR("bpf_fentry_test1", addrs[0]);
+ GET_ADDR("bpf_fentry_test2", addrs[1]);
+ GET_ADDR("bpf_fentry_test3", addrs[2]);
+ GET_ADDR("bpf_fentry_test4", addrs[3]);
+ GET_ADDR("bpf_fentry_test5", addrs[4]);
+ GET_ADDR("bpf_fentry_test6", addrs[5]);
+ GET_ADDR("bpf_fentry_test7", addrs[6]);
+ GET_ADDR("bpf_fentry_test8", addrs[7]);
+
+ opts.kprobe_multi.addrs = (const unsigned long*) addrs;
+ opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
+ test_link_api(&opts);
+}
+
+static void test_link_api_syms(void)
+{
+ LIBBPF_OPTS(bpf_link_create_opts, opts);
+ const char *syms[8] = {
+ "bpf_fentry_test1",
+ "bpf_fentry_test2",
+ "bpf_fentry_test3",
+ "bpf_fentry_test4",
+ "bpf_fentry_test5",
+ "bpf_fentry_test6",
+ "bpf_fentry_test7",
+ "bpf_fentry_test8",
+ };
+
+ opts.kprobe_multi.syms = syms;
+ opts.kprobe_multi.cnt = ARRAY_SIZE(syms);
+ test_link_api(&opts);
+}
+
+static void
+test_attach_api(const char *pattern, struct bpf_kprobe_multi_opts *opts)
+{
+ struct bpf_link *link1 = NULL, *link2 = NULL;
+ struct kprobe_multi *skel = NULL;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ pattern, opts);
+ if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+
+ if (opts) {
+ opts->retprobe = true;
+ link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe,
+ pattern, opts);
+ if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+ }
+
+ kprobe_multi_test_run(skel, !!opts);
+
+cleanup:
+ bpf_link__destroy(link2);
+ bpf_link__destroy(link1);
+ kprobe_multi__destroy(skel);
+}
+
+static void test_attach_api_pattern(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+
+ test_attach_api("bpf_fentry_test*", &opts);
+ test_attach_api("bpf_fentry_test?", NULL);
+}
+
+static void test_attach_api_addrs(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ unsigned long long addrs[8];
+
+ GET_ADDR("bpf_fentry_test1", addrs[0]);
+ GET_ADDR("bpf_fentry_test2", addrs[1]);
+ GET_ADDR("bpf_fentry_test3", addrs[2]);
+ GET_ADDR("bpf_fentry_test4", addrs[3]);
+ GET_ADDR("bpf_fentry_test5", addrs[4]);
+ GET_ADDR("bpf_fentry_test6", addrs[5]);
+ GET_ADDR("bpf_fentry_test7", addrs[6]);
+ GET_ADDR("bpf_fentry_test8", addrs[7]);
+
+ opts.addrs = (const unsigned long *) addrs;
+ opts.cnt = ARRAY_SIZE(addrs);
+ test_attach_api(NULL, &opts);
+}
+
+static void test_attach_api_syms(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ const char *syms[8] = {
+ "bpf_fentry_test1",
+ "bpf_fentry_test2",
+ "bpf_fentry_test3",
+ "bpf_fentry_test4",
+ "bpf_fentry_test5",
+ "bpf_fentry_test6",
+ "bpf_fentry_test7",
+ "bpf_fentry_test8",
+ };
+
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+ test_attach_api(NULL, &opts);
+}
+
+static void test_attach_api_fails(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ struct kprobe_multi *skel = NULL;
+ struct bpf_link *link = NULL;
+ unsigned long long addrs[2];
+ const char *syms[2] = {
+ "bpf_fentry_test1",
+ "bpf_fentry_test2",
+ };
+ __u64 cookies[2];
+
+ addrs[0] = ksym_get_addr("bpf_fentry_test1");
+ addrs[1] = ksym_get_addr("bpf_fentry_test2");
+
+ if (!ASSERT_FALSE(!addrs[0] || !addrs[1], "ksym_get_addr"))
+ goto cleanup;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+
+ /* fail_1 - pattern and opts NULL */
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ NULL, NULL);
+ if (!ASSERT_ERR_PTR(link, "fail_1"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_1_error"))
+ goto cleanup;
+
+ /* fail_2 - both addrs and syms set */
+ opts.addrs = (const unsigned long *) addrs;
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+ opts.cookies = NULL;
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ NULL, &opts);
+ if (!ASSERT_ERR_PTR(link, "fail_2"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_2_error"))
+ goto cleanup;
+
+ /* fail_3 - pattern and addrs set */
+ opts.addrs = (const unsigned long *) addrs;
+ opts.syms = NULL;
+ opts.cnt = ARRAY_SIZE(syms);
+ opts.cookies = NULL;
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ "ksys_*", &opts);
+ if (!ASSERT_ERR_PTR(link, "fail_3"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_3_error"))
+ goto cleanup;
+
+ /* fail_4 - pattern and cnt set */
+ opts.addrs = NULL;
+ opts.syms = NULL;
+ opts.cnt = ARRAY_SIZE(syms);
+ opts.cookies = NULL;
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ "ksys_*", &opts);
+ if (!ASSERT_ERR_PTR(link, "fail_4"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_4_error"))
+ goto cleanup;
+
+ /* fail_5 - pattern and cookies */
+ opts.addrs = NULL;
+ opts.syms = NULL;
+ opts.cnt = 0;
+ opts.cookies = cookies;
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ "ksys_*", &opts);
+ if (!ASSERT_ERR_PTR(link, "fail_5"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_5_error"))
+ goto cleanup;
+
+cleanup:
+ bpf_link__destroy(link);
+ kprobe_multi__destroy(skel);
+}
+
+void test_kprobe_multi_test(void)
+{
+ if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
+ return;
+
+ if (test__start_subtest("skel_api"))
+ test_skel_api();
+ if (test__start_subtest("link_api_addrs"))
+ test_link_api_syms();
+ if (test__start_subtest("link_api_syms"))
+ test_link_api_addrs();
+ if (test__start_subtest("attach_api_pattern"))
+ test_attach_api_pattern();
+ if (test__start_subtest("attach_api_addrs"))
+ test_attach_api_addrs();
+ if (test__start_subtest("attach_api_syms"))
+ test_attach_api_syms();
+ if (test__start_subtest("attach_api_fails"))
+ test_attach_api_fails();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/obj_name.c b/tools/testing/selftests/bpf/prog_tests/obj_name.c
index 6194b776a28b..7093edca6e08 100644
--- a/tools/testing/selftests/bpf/prog_tests/obj_name.c
+++ b/tools/testing/selftests/bpf/prog_tests/obj_name.c
@@ -20,7 +20,7 @@ void test_obj_name(void)
__u32 duration = 0;
int i;
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
size_t name_len = strlen(tests[i].name) + 1;
union bpf_attr attr;
size_t ncopy;
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_branches.c b/tools/testing/selftests/bpf/prog_tests/perf_branches.c
index 12c4f45cee1a..bc24f83339d6 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_branches.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_branches.c
@@ -110,7 +110,7 @@ static void test_perf_branches_hw(void)
attr.type = PERF_TYPE_HARDWARE;
attr.config = PERF_COUNT_HW_CPU_CYCLES;
attr.freq = 1;
- attr.sample_freq = 4000;
+ attr.sample_freq = 1000;
attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
attr.branch_sample_type = PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
@@ -151,7 +151,7 @@ static void test_perf_branches_no_hw(void)
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
- attr.sample_freq = 4000;
+ attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (CHECK(pfd < 0, "perf_event_open", "err %d\n", pfd))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c
index ede07344f264..224eba6fef2e 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_link.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c
@@ -39,7 +39,7 @@ void serial_test_perf_link(void)
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
- attr.sample_freq = 4000;
+ attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index 776916b61c40..d71226e34c34 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -4,11 +4,11 @@
#include <sys/resource.h>
#include "test_send_signal_kern.skel.h"
-int sigusr1_received = 0;
+static int sigusr1_received;
static void sigusr1_handler(int signum)
{
- sigusr1_received++;
+ sigusr1_received = 1;
}
static void test_send_signal_common(struct perf_event_attr *attr,
@@ -40,9 +40,10 @@ static void test_send_signal_common(struct perf_event_attr *attr,
if (pid == 0) {
int old_prio;
+ volatile int j = 0;
/* install signal handler and notify parent */
- signal(SIGUSR1, sigusr1_handler);
+ ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal");
close(pipe_c2p[0]); /* close read */
close(pipe_p2c[1]); /* close write */
@@ -63,9 +64,11 @@ static void test_send_signal_common(struct perf_event_attr *attr,
ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");
/* wait a little for signal handler */
- sleep(1);
+ for (int i = 0; i < 100000000 && !sigusr1_received; i++)
+ j /= i + j + 1;
buf[0] = sigusr1_received ? '2' : '0';
+ ASSERT_EQ(sigusr1_received, 1, "sigusr1_received");
ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write");
/* wait for parent notification and exit */
@@ -93,7 +96,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
goto destroy_skel;
}
} else {
- pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1,
+ pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */,
-1 /* group id */, 0 /* flags */);
if (!ASSERT_GE(pmu_fd, 0, "perf_event_open")) {
err = -1;
@@ -110,9 +113,9 @@ static void test_send_signal_common(struct perf_event_attr *attr,
ASSERT_EQ(read(pipe_c2p[0], buf, 1), 1, "pipe_read");
/* trigger the bpf send_signal */
- skel->bss->pid = pid;
- skel->bss->sig = SIGUSR1;
skel->bss->signal_thread = signal_thread;
+ skel->bss->sig = SIGUSR1;
+ skel->bss->pid = pid;
/* notify child that bpf program can send_signal now */
ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write");
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
new file mode 100644
index 000000000000..1932b1e0685c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "stacktrace_map_skip.skel.h"
+
+#define TEST_STACK_DEPTH 2
+
+void test_stacktrace_map_skip(void)
+{
+ struct stacktrace_map_skip *skel;
+ int stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+ int err, stack_trace_len;
+
+ skel = stacktrace_map_skip__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ /* find map fds */
+ stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
+ if (!ASSERT_GE(stackid_hmap_fd, 0, "stackid_hmap fd"))
+ goto out;
+
+ stackmap_fd = bpf_map__fd(skel->maps.stackmap);
+ if (!ASSERT_GE(stackmap_fd, 0, "stackmap fd"))
+ goto out;
+
+ stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
+ if (!ASSERT_GE(stack_amap_fd, 0, "stack_amap fd"))
+ goto out;
+
+ skel->bss->pid = getpid();
+
+ err = stacktrace_map_skip__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ /* give some time for bpf program run */
+ sleep(1);
+
+ /* disable stack trace collection */
+ skel->bss->control = 1;
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (!ASSERT_OK(err, "compare_map_keys stackid_hmap vs. stackmap"))
+ goto out;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (!ASSERT_OK(err, "compare_map_keys stackmap vs. stackid_hmap"))
+ goto out;
+
+ stack_trace_len = TEST_STACK_DEPTH * sizeof(__u64);
+ err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
+ if (!ASSERT_OK(err, "compare_stack_ips stackmap vs. stack_amap"))
+ goto out;
+
+ if (!ASSERT_EQ(skel->bss->failed, 0, "skip_failed"))
+ goto out;
+
+out:
+ stacktrace_map_skip__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/subprogs.c b/tools/testing/selftests/bpf/prog_tests/subprogs.c
index 3f3d2ac4dd57..903f35a9e62e 100644
--- a/tools/testing/selftests/bpf/prog_tests/subprogs.c
+++ b/tools/testing/selftests/bpf/prog_tests/subprogs.c
@@ -1,32 +1,83 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
-#include <time.h>
#include "test_subprogs.skel.h"
#include "test_subprogs_unused.skel.h"
-static int duration;
+struct toggler_ctx {
+ int fd;
+ bool stop;
+};
-void test_subprogs(void)
+static void *toggle_jit_harden(void *arg)
+{
+ struct toggler_ctx *ctx = arg;
+ char two = '2';
+ char zero = '0';
+
+ while (!ctx->stop) {
+ lseek(ctx->fd, SEEK_SET, 0);
+ write(ctx->fd, &two, sizeof(two));
+ lseek(ctx->fd, SEEK_SET, 0);
+ write(ctx->fd, &zero, sizeof(zero));
+ }
+
+ return NULL;
+}
+
+static void test_subprogs_with_jit_harden_toggling(void)
+{
+ struct toggler_ctx ctx;
+ pthread_t toggler;
+ int err;
+ unsigned int i, loop = 10;
+
+ ctx.fd = open("/proc/sys/net/core/bpf_jit_harden", O_RDWR);
+ if (!ASSERT_GE(ctx.fd, 0, "open bpf_jit_harden"))
+ return;
+
+ ctx.stop = false;
+ err = pthread_create(&toggler, NULL, toggle_jit_harden, &ctx);
+ if (!ASSERT_OK(err, "new toggler"))
+ goto out;
+
+ /* Make toggler thread to run */
+ usleep(1);
+
+ for (i = 0; i < loop; i++) {
+ struct test_subprogs *skel = test_subprogs__open_and_load();
+
+ if (!ASSERT_OK_PTR(skel, "skel open"))
+ break;
+ test_subprogs__destroy(skel);
+ }
+
+ ctx.stop = true;
+ pthread_join(toggler, NULL);
+out:
+ close(ctx.fd);
+}
+
+static void test_subprogs_alone(void)
{
struct test_subprogs *skel;
struct test_subprogs_unused *skel2;
int err;
skel = test_subprogs__open_and_load();
- if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
err = test_subprogs__attach(skel);
- if (CHECK(err, "skel_attach", "failed to attach skeleton: %d\n", err))
+ if (!ASSERT_OK(err, "skel attach"))
goto cleanup;
usleep(1);
- CHECK(skel->bss->res1 != 12, "res1", "got %d, exp %d\n", skel->bss->res1, 12);
- CHECK(skel->bss->res2 != 17, "res2", "got %d, exp %d\n", skel->bss->res2, 17);
- CHECK(skel->bss->res3 != 19, "res3", "got %d, exp %d\n", skel->bss->res3, 19);
- CHECK(skel->bss->res4 != 36, "res4", "got %d, exp %d\n", skel->bss->res4, 36);
+ ASSERT_EQ(skel->bss->res1, 12, "res1");
+ ASSERT_EQ(skel->bss->res2, 17, "res2");
+ ASSERT_EQ(skel->bss->res3, 19, "res3");
+ ASSERT_EQ(skel->bss->res4, 36, "res4");
skel2 = test_subprogs_unused__open_and_load();
ASSERT_OK_PTR(skel2, "unused_progs_skel");
@@ -35,3 +86,11 @@ void test_subprogs(void)
cleanup:
test_subprogs__destroy(skel);
}
+
+void test_subprogs(void)
+{
+ if (test__start_subtest("subprogs_alone"))
+ test_subprogs_alone();
+ if (test__start_subtest("subprogs_and_jit_harden"))
+ test_subprogs_with_jit_harden_toggling();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/subskeleton.c b/tools/testing/selftests/bpf/prog_tests/subskeleton.c
new file mode 100644
index 000000000000..9c31b7004f9c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/subskeleton.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "test_subskeleton.skel.h"
+#include "test_subskeleton_lib.subskel.h"
+
+static void subskeleton_lib_setup(struct bpf_object *obj)
+{
+ struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj);
+
+ if (!ASSERT_OK_PTR(lib, "open subskeleton"))
+ return;
+
+ *lib->rodata.var1 = 1;
+ *lib->data.var2 = 2;
+ lib->bss.var3->var3_1 = 3;
+ lib->bss.var3->var3_2 = 4;
+
+ test_subskeleton_lib__destroy(lib);
+}
+
+static int subskeleton_lib_subresult(struct bpf_object *obj)
+{
+ struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj);
+ int result;
+
+ if (!ASSERT_OK_PTR(lib, "open subskeleton"))
+ return -EINVAL;
+
+ result = *lib->bss.libout1;
+ ASSERT_EQ(result, 1 + 2 + 3 + 4 + 5 + 6, "lib subresult");
+
+ ASSERT_OK_PTR(lib->progs.lib_perf_handler, "lib_perf_handler");
+ ASSERT_STREQ(bpf_program__name(lib->progs.lib_perf_handler),
+ "lib_perf_handler", "program name");
+
+ ASSERT_OK_PTR(lib->maps.map1, "map1");
+ ASSERT_STREQ(bpf_map__name(lib->maps.map1), "map1", "map name");
+
+ ASSERT_EQ(*lib->data.var5, 5, "__weak var5");
+ ASSERT_EQ(*lib->data.var6, 6, "extern var6");
+ ASSERT_TRUE(*lib->kconfig.CONFIG_BPF_SYSCALL, "CONFIG_BPF_SYSCALL");
+
+ test_subskeleton_lib__destroy(lib);
+ return result;
+}
+
+void test_subskeleton(void)
+{
+ int err, result;
+ struct test_subskeleton *skel;
+
+ skel = test_subskeleton__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->rodata->rovar1 = 10;
+ skel->rodata->var1 = 1;
+ subskeleton_lib_setup(skel->obj);
+
+ err = test_subskeleton__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = test_subskeleton__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+
+ result = subskeleton_lib_subresult(skel->obj) * 10;
+ ASSERT_EQ(skel->bss->out1, result, "unexpected calculation");
+
+cleanup:
+ test_subskeleton__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
index 2b255e28ed26..7ad66a247c02 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -10,8 +10,6 @@
* to drop unexpected traffic.
*/
-#define _GNU_SOURCE
-
#include <arpa/inet.h>
#include <linux/if.h>
#include <linux/if_tun.h>
@@ -19,10 +17,8 @@
#include <linux/sysctl.h>
#include <linux/time_types.h>
#include <linux/net_tstamp.h>
-#include <sched.h>
#include <stdbool.h>
#include <stdio.h>
-#include <sys/mount.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -92,91 +88,6 @@ static int write_file(const char *path, const char *newval)
return 0;
}
-struct nstoken {
- int orig_netns_fd;
-};
-
-static int setns_by_fd(int nsfd)
-{
- int err;
-
- err = setns(nsfd, CLONE_NEWNET);
- close(nsfd);
-
- if (!ASSERT_OK(err, "setns"))
- return err;
-
- /* Switch /sys to the new namespace so that e.g. /sys/class/net
- * reflects the devices in the new namespace.
- */
- err = unshare(CLONE_NEWNS);
- if (!ASSERT_OK(err, "unshare"))
- return err;
-
- /* Make our /sys mount private, so the following umount won't
- * trigger the global umount in case it's shared.
- */
- err = mount("none", "/sys", NULL, MS_PRIVATE, NULL);
- if (!ASSERT_OK(err, "remount private /sys"))
- return err;
-
- err = umount2("/sys", MNT_DETACH);
- if (!ASSERT_OK(err, "umount2 /sys"))
- return err;
-
- err = mount("sysfs", "/sys", "sysfs", 0, NULL);
- if (!ASSERT_OK(err, "mount /sys"))
- return err;
-
- err = mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL);
- if (!ASSERT_OK(err, "mount /sys/fs/bpf"))
- return err;
-
- return 0;
-}
-
-/**
- * open_netns() - Switch to specified network namespace by name.
- *
- * Returns token with which to restore the original namespace
- * using close_netns().
- */
-static struct nstoken *open_netns(const char *name)
-{
- int nsfd;
- char nspath[PATH_MAX];
- int err;
- struct nstoken *token;
-
- token = calloc(1, sizeof(struct nstoken));
- if (!ASSERT_OK_PTR(token, "malloc token"))
- return NULL;
-
- token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY);
- if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net"))
- goto fail;
-
- snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name);
- nsfd = open(nspath, O_RDONLY | O_CLOEXEC);
- if (!ASSERT_GE(nsfd, 0, "open netns fd"))
- goto fail;
-
- err = setns_by_fd(nsfd);
- if (!ASSERT_OK(err, "setns_by_fd"))
- goto fail;
-
- return token;
-fail:
- free(token);
- return NULL;
-}
-
-static void close_netns(struct nstoken *token)
-{
- ASSERT_OK(setns_by_fd(token->orig_netns_fd), "setns_by_fd");
- free(token);
-}
-
static int netns_setup_namespaces(const char *verb)
{
const char * const *ns = namespaces;
diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c
index 97d8a6f84f4a..b13feceb38f1 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_ima.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c
@@ -13,14 +13,17 @@
#include "ima.skel.h"
-static int run_measured_process(const char *measured_dir, u32 *monitored_pid)
+#define MAX_SAMPLES 4
+
+static int _run_measured_process(const char *measured_dir, u32 *monitored_pid,
+ const char *cmd)
{
int child_pid, child_status;
child_pid = fork();
if (child_pid == 0) {
*monitored_pid = getpid();
- execlp("./ima_setup.sh", "./ima_setup.sh", "run", measured_dir,
+ execlp("./ima_setup.sh", "./ima_setup.sh", cmd, measured_dir,
NULL);
exit(errno);
@@ -32,19 +35,39 @@ static int run_measured_process(const char *measured_dir, u32 *monitored_pid)
return -EINVAL;
}
-static u64 ima_hash_from_bpf;
+static int run_measured_process(const char *measured_dir, u32 *monitored_pid)
+{
+ return _run_measured_process(measured_dir, monitored_pid, "run");
+}
+
+static u64 ima_hash_from_bpf[MAX_SAMPLES];
+static int ima_hash_from_bpf_idx;
static int process_sample(void *ctx, void *data, size_t len)
{
- ima_hash_from_bpf = *((u64 *)data);
+ if (ima_hash_from_bpf_idx >= MAX_SAMPLES)
+ return -ENOSPC;
+
+ ima_hash_from_bpf[ima_hash_from_bpf_idx++] = *((u64 *)data);
return 0;
}
+static void test_init(struct ima__bss *bss)
+{
+ ima_hash_from_bpf_idx = 0;
+
+ bss->use_ima_file_hash = false;
+ bss->enable_bprm_creds_for_exec = false;
+ bss->enable_kernel_read_file = false;
+ bss->test_deny = false;
+}
+
void test_test_ima(void)
{
char measured_dir_template[] = "/tmp/ima_measuredXXXXXX";
struct ring_buffer *ringbuf = NULL;
const char *measured_dir;
+ u64 bin_true_sample;
char cmd[256];
int err, duration = 0;
@@ -72,13 +95,127 @@ void test_test_ima(void)
if (CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno))
goto close_clean;
+ /*
+ * Test #1
+ * - Goal: obtain a sample with the bpf_ima_inode_hash() helper
+ * - Expected result: 1 sample (/bin/true)
+ */
+ test_init(skel->bss);
err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
- if (CHECK(err, "run_measured_process", "err = %d\n", err))
+ if (CHECK(err, "run_measured_process #1", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 1, "num_samples_or_err");
- ASSERT_NEQ(ima_hash_from_bpf, 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
+
+ /*
+ * Test #2
+ * - Goal: obtain samples with the bpf_ima_file_hash() helper
+ * - Expected result: 2 samples (./ima_setup.sh, /bin/true)
+ */
+ test_init(skel->bss);
+ skel->bss->use_ima_file_hash = true;
+ err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
+ if (CHECK(err, "run_measured_process #2", "err = %d\n", err))
+ goto close_clean;
+
+ err = ring_buffer__consume(ringbuf);
+ ASSERT_EQ(err, 2, "num_samples_or_err");
+ ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
+ bin_true_sample = ima_hash_from_bpf[1];
+
+ /*
+ * Test #3
+ * - Goal: confirm that bpf_ima_inode_hash() returns a non-fresh digest
+ * - Expected result: 2 samples (/bin/true: non-fresh, fresh)
+ */
+ test_init(skel->bss);
+
+ err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
+ "modify-bin");
+ if (CHECK(err, "modify-bin #3", "err = %d\n", err))
+ goto close_clean;
+
+ skel->bss->enable_bprm_creds_for_exec = true;
+ err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
+ if (CHECK(err, "run_measured_process #3", "err = %d\n", err))
+ goto close_clean;
+
+ err = ring_buffer__consume(ringbuf);
+ ASSERT_EQ(err, 2, "num_samples_or_err");
+ ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
+ ASSERT_EQ(ima_hash_from_bpf[0], bin_true_sample, "sample_equal_or_err");
+ /* IMA refreshed the digest. */
+ ASSERT_NEQ(ima_hash_from_bpf[1], bin_true_sample,
+ "sample_different_or_err");
+
+ /*
+ * Test #4
+ * - Goal: verify that bpf_ima_file_hash() returns a fresh digest
+ * - Expected result: 4 samples (./ima_setup.sh: fresh, fresh;
+ * /bin/true: fresh, fresh)
+ */
+ test_init(skel->bss);
+ skel->bss->use_ima_file_hash = true;
+ skel->bss->enable_bprm_creds_for_exec = true;
+ err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
+ if (CHECK(err, "run_measured_process #4", "err = %d\n", err))
+ goto close_clean;
+
+ err = ring_buffer__consume(ringbuf);
+ ASSERT_EQ(err, 4, "num_samples_or_err");
+ ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[2], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[3], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[2], bin_true_sample,
+ "sample_different_or_err");
+ ASSERT_EQ(ima_hash_from_bpf[3], ima_hash_from_bpf[2],
+ "sample_equal_or_err");
+
+ skel->bss->use_ima_file_hash = false;
+ skel->bss->enable_bprm_creds_for_exec = false;
+ err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
+ "restore-bin");
+ if (CHECK(err, "restore-bin #3", "err = %d\n", err))
+ goto close_clean;
+
+ /*
+ * Test #5
+ * - Goal: obtain a sample from the kernel_read_file hook
+ * - Expected result: 2 samples (./ima_setup.sh, policy_test)
+ */
+ test_init(skel->bss);
+ skel->bss->use_ima_file_hash = true;
+ skel->bss->enable_kernel_read_file = true;
+ err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
+ "load-policy");
+ if (CHECK(err, "run_measured_process #5", "err = %d\n", err))
+ goto close_clean;
+
+ err = ring_buffer__consume(ringbuf);
+ ASSERT_EQ(err, 2, "num_samples_or_err");
+ ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
+
+ /*
+ * Test #6
+ * - Goal: ensure that the kernel_read_file hook denies an operation
+ * - Expected result: 0 samples
+ */
+ test_init(skel->bss);
+ skel->bss->enable_kernel_read_file = true;
+ skel->bss->test_deny = true;
+ err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
+ "load-policy");
+ if (CHECK(!err, "run_measured_process #6", "err = %d\n", err))
+ goto close_clean;
+
+ err = ring_buffer__consume(ringbuf);
+ ASSERT_EQ(err, 0, "num_samples_or_err");
close_clean:
snprintf(cmd, sizeof(cmd), "./ima_setup.sh cleanup %s", measured_dir);
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
new file mode 100644
index 000000000000..a50971c6cf4a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <net/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ipv6.h>
+#include <linux/in6.h>
+#include <linux/udp.h>
+#include <bpf/bpf_endian.h>
+#include "test_xdp_do_redirect.skel.h"
+
+#define SYS(fmt, ...) \
+ ({ \
+ char cmd[1024]; \
+ snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
+ if (!ASSERT_OK(system(cmd), cmd)) \
+ goto out; \
+ })
+
+struct udp_packet {
+ struct ethhdr eth;
+ struct ipv6hdr iph;
+ struct udphdr udp;
+ __u8 payload[64 - sizeof(struct udphdr)
+ - sizeof(struct ethhdr) - sizeof(struct ipv6hdr)];
+} __packed;
+
+static struct udp_packet pkt_udp = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
+ .eth.h_dest = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55},
+ .eth.h_source = {0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb},
+ .iph.version = 6,
+ .iph.nexthdr = IPPROTO_UDP,
+ .iph.payload_len = bpf_htons(sizeof(struct udp_packet)
+ - offsetof(struct udp_packet, udp)),
+ .iph.hop_limit = 2,
+ .iph.saddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(1)},
+ .iph.daddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(2)},
+ .udp.source = bpf_htons(1),
+ .udp.dest = bpf_htons(1),
+ .udp.len = bpf_htons(sizeof(struct udp_packet)
+ - offsetof(struct udp_packet, udp)),
+ .payload = {0x42}, /* receiver XDP program matches on this */
+};
+
+static int attach_tc_prog(struct bpf_tc_hook *hook, int fd)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd);
+ int ret;
+
+ ret = bpf_tc_hook_create(hook);
+ if (!ASSERT_OK(ret, "create tc hook"))
+ return ret;
+
+ ret = bpf_tc_attach(hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach")) {
+ bpf_tc_hook_destroy(hook);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) -
+ * sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes
+ */
+#define MAX_PKT_SIZE 3368
+static void test_max_pkt_size(int fd)
+{
+ char data[MAX_PKT_SIZE + 1] = {};
+ int err;
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data,
+ .data_size_in = MAX_PKT_SIZE,
+ .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
+ .repeat = 1,
+ );
+ err = bpf_prog_test_run_opts(fd, &opts);
+ ASSERT_OK(err, "prog_run_max_size");
+
+ opts.data_size_in += 1;
+ err = bpf_prog_test_run_opts(fd, &opts);
+ ASSERT_EQ(err, -EINVAL, "prog_run_too_big");
+}
+
+#define NUM_PKTS 10000
+void test_xdp_do_redirect(void)
+{
+ int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst;
+ char data[sizeof(pkt_udp) + sizeof(__u32)];
+ struct test_xdp_do_redirect *skel = NULL;
+ struct nstoken *nstoken = NULL;
+ struct bpf_link *link;
+
+ struct xdp_md ctx_in = { .data = sizeof(__u32),
+ .data_end = sizeof(data) };
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data,
+ .data_size_in = sizeof(data),
+ .ctx_in = &ctx_in,
+ .ctx_size_in = sizeof(ctx_in),
+ .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
+ .repeat = NUM_PKTS,
+ .batch_size = 64,
+ );
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook,
+ .attach_point = BPF_TC_INGRESS);
+
+ memcpy(&data[sizeof(__u32)], &pkt_udp, sizeof(pkt_udp));
+ *((__u32 *)data) = 0x42; /* metadata test value */
+
+ skel = test_xdp_do_redirect__open();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ return;
+
+ /* The XDP program we run with bpf_prog_run() will cycle through all
+ * three xmit (PASS/TX/REDIRECT) return codes starting from above, and
+ * ending up with PASS, so we should end up with two packets on the dst
+ * iface and NUM_PKTS-2 in the TC hook. We match the packets on the UDP
+ * payload.
+ */
+ SYS("ip netns add testns");
+ nstoken = open_netns("testns");
+ if (!ASSERT_OK_PTR(nstoken, "setns"))
+ goto out;
+
+ SYS("ip link add veth_src type veth peer name veth_dst");
+ SYS("ip link set dev veth_src address 00:11:22:33:44:55");
+ SYS("ip link set dev veth_dst address 66:77:88:99:aa:bb");
+ SYS("ip link set dev veth_src up");
+ SYS("ip link set dev veth_dst up");
+ SYS("ip addr add dev veth_src fc00::1/64");
+ SYS("ip addr add dev veth_dst fc00::2/64");
+ SYS("ip neigh add fc00::2 dev veth_src lladdr 66:77:88:99:aa:bb");
+
+ /* We enable forwarding in the test namespace because that will cause
+ * the packets that go through the kernel stack (with XDP_PASS) to be
+ * forwarded back out the same interface (because of the packet dst
+ * combined with the interface addresses). When this happens, the
+ * regular forwarding path will end up going through the same
+ * veth_xdp_xmit() call as the XDP_REDIRECT code, which can cause a
+ * deadlock if it happens on the same CPU. There's a local_bh_disable()
+ * in the test_run code to prevent this, but an earlier version of the
+ * code didn't have this, so we keep the test behaviour to make sure the
+ * bug doesn't resurface.
+ */
+ SYS("sysctl -qw net.ipv6.conf.all.forwarding=1");
+
+ ifindex_src = if_nametoindex("veth_src");
+ ifindex_dst = if_nametoindex("veth_dst");
+ if (!ASSERT_NEQ(ifindex_src, 0, "ifindex_src") ||
+ !ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst"))
+ goto out;
+
+ memcpy(skel->rodata->expect_dst, &pkt_udp.eth.h_dest, ETH_ALEN);
+ skel->rodata->ifindex_out = ifindex_src; /* redirect back to the same iface */
+ skel->rodata->ifindex_in = ifindex_src;
+ ctx_in.ingress_ifindex = ifindex_src;
+ tc_hook.ifindex = ifindex_src;
+
+ if (!ASSERT_OK(test_xdp_do_redirect__load(skel), "load"))
+ goto out;
+
+ link = bpf_program__attach_xdp(skel->progs.xdp_count_pkts, ifindex_dst);
+ if (!ASSERT_OK_PTR(link, "prog_attach"))
+ goto out;
+ skel->links.xdp_count_pkts = link;
+
+ tc_prog_fd = bpf_program__fd(skel->progs.tc_count_pkts);
+ if (attach_tc_prog(&tc_hook, tc_prog_fd))
+ goto out;
+
+ xdp_prog_fd = bpf_program__fd(skel->progs.xdp_redirect);
+ err = bpf_prog_test_run_opts(xdp_prog_fd, &opts);
+ if (!ASSERT_OK(err, "prog_run"))
+ goto out_tc;
+
+ /* wait for the packets to be flushed */
+ kern_sync_rcu();
+
+ /* There will be one packet sent through XDP_REDIRECT and one through
+ * XDP_TX; these will show up on the XDP counting program, while the
+ * rest will be counted at the TC ingress hook (and the counting program
+ * resets the packet payload so they don't get counted twice even though
+ * they are re-xmited out the veth device
+ */
+ ASSERT_EQ(skel->bss->pkts_seen_xdp, 2, "pkt_count_xdp");
+ ASSERT_EQ(skel->bss->pkts_seen_zero, 2, "pkt_count_zero");
+ ASSERT_EQ(skel->bss->pkts_seen_tc, NUM_PKTS - 2, "pkt_count_tc");
+
+ test_max_pkt_size(bpf_program__fd(skel->progs.xdp_count_pkts));
+
+out_tc:
+ bpf_tc_hook_destroy(&tc_hook);
+out:
+ if (nstoken)
+ close_netns(nstoken);
+ system("ip netns del testns");
+ test_xdp_do_redirect__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c
new file mode 100644
index 000000000000..8feddb8289cf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct bpf_testmod_btf_type_tag_1 {
+ int a;
+};
+
+struct bpf_testmod_btf_type_tag_2 {
+ struct bpf_testmod_btf_type_tag_1 *p;
+};
+
+__u64 g;
+
+SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_1")
+int BPF_PROG(test_percpu1, struct bpf_testmod_btf_type_tag_1 *arg)
+{
+ g = arg->a;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_2")
+int BPF_PROG(test_percpu2, struct bpf_testmod_btf_type_tag_2 *arg)
+{
+ g = arg->p->a;
+ return 0;
+}
+
+/* trace_cgroup_mkdir(struct cgroup *cgrp, const char *path)
+ *
+ * struct cgroup_rstat_cpu {
+ * ...
+ * struct cgroup *updated_children;
+ * ...
+ * };
+ *
+ * struct cgroup {
+ * ...
+ * struct cgroup_rstat_cpu __percpu *rstat_cpu;
+ * ...
+ * };
+ */
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_percpu_load, struct cgroup *cgrp, const char *path)
+{
+ g = (__u64)cgrp->rstat_cpu->updated_children;
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_percpu_helper, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup_rstat_cpu *rstat;
+ __u32 cpu;
+
+ cpu = bpf_get_smp_processor_id();
+ rstat = (struct cgroup_rstat_cpu *)bpf_per_cpu_ptr(cgrp->rstat_cpu, cpu);
+ if (rstat) {
+ /* READ_ONCE */
+ *(volatile int *)rstat;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/ima.c b/tools/testing/selftests/bpf/progs/ima.c
index 96060ff4ffc6..e16a2c208481 100644
--- a/tools/testing/selftests/bpf/progs/ima.c
+++ b/tools/testing/selftests/bpf/progs/ima.c
@@ -18,8 +18,12 @@ struct {
char _license[] SEC("license") = "GPL";
-SEC("lsm.s/bprm_committed_creds")
-void BPF_PROG(ima, struct linux_binprm *bprm)
+bool use_ima_file_hash;
+bool enable_bprm_creds_for_exec;
+bool enable_kernel_read_file;
+bool test_deny;
+
+static void ima_test_common(struct file *file)
{
u64 ima_hash = 0;
u64 *sample;
@@ -28,8 +32,12 @@ void BPF_PROG(ima, struct linux_binprm *bprm)
pid = bpf_get_current_pid_tgid() >> 32;
if (pid == monitored_pid) {
- ret = bpf_ima_inode_hash(bprm->file->f_inode, &ima_hash,
- sizeof(ima_hash));
+ if (!use_ima_file_hash)
+ ret = bpf_ima_inode_hash(file->f_inode, &ima_hash,
+ sizeof(ima_hash));
+ else
+ ret = bpf_ima_file_hash(file, &ima_hash,
+ sizeof(ima_hash));
if (ret < 0 || ima_hash == 0)
return;
@@ -43,3 +51,53 @@ void BPF_PROG(ima, struct linux_binprm *bprm)
return;
}
+
+static int ima_test_deny(void)
+{
+ u32 pid;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid == monitored_pid && test_deny)
+ return -EPERM;
+
+ return 0;
+}
+
+SEC("lsm.s/bprm_committed_creds")
+void BPF_PROG(bprm_committed_creds, struct linux_binprm *bprm)
+{
+ ima_test_common(bprm->file);
+}
+
+SEC("lsm.s/bprm_creds_for_exec")
+int BPF_PROG(bprm_creds_for_exec, struct linux_binprm *bprm)
+{
+ if (!enable_bprm_creds_for_exec)
+ return 0;
+
+ ima_test_common(bprm->file);
+ return 0;
+}
+
+SEC("lsm.s/kernel_read_file")
+int BPF_PROG(kernel_read_file, struct file *file, enum kernel_read_file_id id,
+ bool contents)
+{
+ int ret;
+
+ if (!enable_kernel_read_file)
+ return 0;
+
+ if (!contents)
+ return 0;
+
+ if (id != READING_POLICY)
+ return 0;
+
+ ret = ima_test_deny();
+ if (ret < 0)
+ return ret;
+
+ ima_test_common(file);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c
new file mode 100644
index 000000000000..600be50800f8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+
+char _license[] SEC("license") = "GPL";
+
+extern const void bpf_fentry_test1 __ksym;
+extern const void bpf_fentry_test2 __ksym;
+extern const void bpf_fentry_test3 __ksym;
+extern const void bpf_fentry_test4 __ksym;
+extern const void bpf_fentry_test5 __ksym;
+extern const void bpf_fentry_test6 __ksym;
+extern const void bpf_fentry_test7 __ksym;
+extern const void bpf_fentry_test8 __ksym;
+
+int pid = 0;
+bool test_cookie = false;
+
+__u64 kprobe_test1_result = 0;
+__u64 kprobe_test2_result = 0;
+__u64 kprobe_test3_result = 0;
+__u64 kprobe_test4_result = 0;
+__u64 kprobe_test5_result = 0;
+__u64 kprobe_test6_result = 0;
+__u64 kprobe_test7_result = 0;
+__u64 kprobe_test8_result = 0;
+
+__u64 kretprobe_test1_result = 0;
+__u64 kretprobe_test2_result = 0;
+__u64 kretprobe_test3_result = 0;
+__u64 kretprobe_test4_result = 0;
+__u64 kretprobe_test5_result = 0;
+__u64 kretprobe_test6_result = 0;
+__u64 kretprobe_test7_result = 0;
+__u64 kretprobe_test8_result = 0;
+
+extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
+
+static void kprobe_multi_check(void *ctx, bool is_return)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
+ __u64 addr = bpf_get_func_ip(ctx) - (CONFIG_X86_KERNEL_IBT ? 4 : 0);
+
+#define SET(__var, __addr, __cookie) ({ \
+ if (((const void *) addr == __addr) && \
+ (!test_cookie || (cookie == __cookie))) \
+ __var = 1; \
+})
+
+ if (is_return) {
+ SET(kretprobe_test1_result, &bpf_fentry_test1, 8);
+ SET(kretprobe_test2_result, &bpf_fentry_test2, 7);
+ SET(kretprobe_test3_result, &bpf_fentry_test3, 6);
+ SET(kretprobe_test4_result, &bpf_fentry_test4, 5);
+ SET(kretprobe_test5_result, &bpf_fentry_test5, 4);
+ SET(kretprobe_test6_result, &bpf_fentry_test6, 3);
+ SET(kretprobe_test7_result, &bpf_fentry_test7, 2);
+ SET(kretprobe_test8_result, &bpf_fentry_test8, 1);
+ } else {
+ SET(kprobe_test1_result, &bpf_fentry_test1, 1);
+ SET(kprobe_test2_result, &bpf_fentry_test2, 2);
+ SET(kprobe_test3_result, &bpf_fentry_test3, 3);
+ SET(kprobe_test4_result, &bpf_fentry_test4, 4);
+ SET(kprobe_test5_result, &bpf_fentry_test5, 5);
+ SET(kprobe_test6_result, &bpf_fentry_test6, 6);
+ SET(kprobe_test7_result, &bpf_fentry_test7, 7);
+ SET(kprobe_test8_result, &bpf_fentry_test8, 8);
+ }
+
+#undef SET
+}
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe.multi/bpf_fentry_tes??")
+int test_kprobe(struct pt_regs *ctx)
+{
+ kprobe_multi_check(ctx, false);
+ return 0;
+}
+
+SEC("kretprobe.multi/bpf_fentry_test*")
+int test_kretprobe(struct pt_regs *ctx)
+{
+ kprobe_multi_check(ctx, true);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c
index 9b1f9b75d5c2..19423ed862e3 100644
--- a/tools/testing/selftests/bpf/progs/local_storage.c
+++ b/tools/testing/selftests/bpf/progs/local_storage.c
@@ -37,6 +37,13 @@ struct {
} sk_storage_map SEC(".maps");
struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE);
+ __type(key, int);
+ __type(value, struct local_storage);
+} sk_storage_map2 SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
@@ -115,7 +122,19 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
if (storage->value != DUMMY_STORAGE_VALUE)
sk_storage_result = -1;
+ /* This tests that we can associate multiple elements
+ * with the local storage.
+ */
+ storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 0;
+
err = bpf_sk_storage_delete(&sk_storage_map, sock->sk);
+ if (err)
+ return 0;
+
+ err = bpf_sk_storage_delete(&sk_storage_map2, sock->sk);
if (!err)
sk_storage_result = err;
diff --git a/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
new file mode 100644
index 000000000000..2eb297df3dd6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+#define TEST_STACK_DEPTH 2
+#define TEST_MAX_ENTRIES 16384
+
+typedef __u64 stack_trace_t[TEST_STACK_DEPTH];
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(max_entries, TEST_MAX_ENTRIES);
+ __type(key, __u32);
+ __type(value, stack_trace_t);
+} stackmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, TEST_MAX_ENTRIES);
+ __type(key, __u32);
+ __type(value, __u32);
+} stackid_hmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, TEST_MAX_ENTRIES);
+ __type(key, __u32);
+ __type(value, stack_trace_t);
+} stack_amap SEC(".maps");
+
+int pid = 0;
+int control = 0;
+int failed = 0;
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct trace_event_raw_sched_switch *ctx)
+{
+ __u32 max_len = TEST_STACK_DEPTH * sizeof(__u64);
+ __u32 key = 0, val = 0;
+ __u64 *stack_p;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ if (control)
+ return 0;
+
+ /* it should allow skipping whole buffer size entries */
+ key = bpf_get_stackid(ctx, &stackmap, TEST_STACK_DEPTH);
+ if ((int)key >= 0) {
+ /* The size of stackmap and stack_amap should be the same */
+ bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
+ stack_p = bpf_map_lookup_elem(&stack_amap, &key);
+ if (stack_p) {
+ bpf_get_stack(ctx, stack_p, max_len, TEST_STACK_DEPTH);
+ /* it wrongly skipped all the entries and filled zero */
+ if (stack_p[0] == 0)
+ failed = 1;
+ }
+ } else {
+ /* old kernel doesn't support skipping that many entries */
+ failed = 2;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c b/tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c
new file mode 100644
index 000000000000..4061f701ca50
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+const volatile int my_pid;
+
+bool abc1_called;
+bool abc2_called;
+bool custom1_called;
+bool custom2_called;
+bool kprobe1_called;
+bool xyz_called;
+
+SEC("abc")
+int abc1(void *ctx)
+{
+ abc1_called = true;
+ return 0;
+}
+
+SEC("abc/whatever")
+int abc2(void *ctx)
+{
+ abc2_called = true;
+ return 0;
+}
+
+SEC("custom")
+int custom1(void *ctx)
+{
+ custom1_called = true;
+ return 0;
+}
+
+SEC("custom/something")
+int custom2(void *ctx)
+{
+ custom2_called = true;
+ return 0;
+}
+
+SEC("kprobe")
+int kprobe1(void *ctx)
+{
+ kprobe1_called = true;
+ return 0;
+}
+
+SEC("xyz/blah")
+int xyz(void *ctx)
+{
+ int whatever;
+
+ /* use sleepable helper, custom handler should set sleepable flag */
+ bpf_copy_from_user(&whatever, sizeof(whatever), NULL);
+ xyz_called = true;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c
index b4233d3efac2..92354cd72044 100644
--- a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c
@@ -10,7 +10,7 @@ static __always_inline int bpf_send_signal_test(void *ctx)
{
int ret;
- if (status != 0 || sig == 0 || pid == 0)
+ if (status != 0 || pid == 0)
return 0;
if ((bpf_get_current_pid_tgid() >> 32) == pid) {
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
index bf5b7caefdd0..6058dcb11b36 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
@@ -413,15 +413,20 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
/* Narrow loads from remote_port field. Expect SRC_PORT. */
if (LSB(ctx->remote_port, 0) != ((SRC_PORT >> 0) & 0xff) ||
- LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff) ||
- LSB(ctx->remote_port, 2) != 0 || LSB(ctx->remote_port, 3) != 0)
+ LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff))
return SK_DROP;
if (LSW(ctx->remote_port, 0) != SRC_PORT)
return SK_DROP;
- /* Load from remote_port field with zero padding (backward compatibility) */
+ /*
+ * NOTE: 4-byte load from bpf_sk_lookup at remote_port offset
+ * is quirky. It gets rewritten by the access converter to a
+ * 2-byte load for backward compatibility. Treating the load
+ * result as a be16 value makes the code portable across
+ * little- and big-endian platforms.
+ */
val_u32 = *(__u32 *)&ctx->remote_port;
- if (val_u32 != bpf_htonl(bpf_ntohs(SRC_PORT) << 16))
+ if (val_u32 != SRC_PORT)
return SK_DROP;
/* Narrow loads from local_port field. Expect DST_PORT. */
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c
index 246f1f001813..9f4b8f9f1181 100644
--- a/tools/testing/selftests/bpf/progs/test_sock_fields.c
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c
@@ -114,7 +114,7 @@ static void tpcpy(struct bpf_tcp_sock *dst,
#define RET_LOG() ({ \
linum = __LINE__; \
- bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_NOEXIST); \
+ bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_ANY); \
return CG_OK; \
})
@@ -134,11 +134,11 @@ int egress_read_sock_fields(struct __sk_buff *skb)
if (!sk)
RET_LOG();
- /* Not the testing egress traffic or
- * TCP_LISTEN (10) socket will be copied at the ingress side.
+ /* Not testing the egress traffic or the listening socket,
+ * which are covered by the cgroup_skb/ingress test program.
*/
if (sk->family != AF_INET6 || !is_loopback6(sk->src_ip6) ||
- sk->state == 10)
+ sk->state == BPF_TCP_LISTEN)
return CG_OK;
if (sk->src_port == bpf_ntohs(srv_sa6.sin6_port)) {
@@ -232,8 +232,8 @@ int ingress_read_sock_fields(struct __sk_buff *skb)
sk->src_port != bpf_ntohs(srv_sa6.sin6_port))
return CG_OK;
- /* Only interested in TCP_LISTEN */
- if (sk->state != 10)
+ /* Only interested in the listening socket */
+ if (sk->state != BPF_TCP_LISTEN)
return CG_OK;
/* It must be a fullsock for cgroup_skb/ingress prog */
@@ -251,10 +251,16 @@ int ingress_read_sock_fields(struct __sk_buff *skb)
return CG_OK;
}
+/*
+ * NOTE: 4-byte load from bpf_sock at dst_port offset is quirky. It
+ * gets rewritten by the access converter to a 2-byte load for
+ * backward compatibility. Treating the load result as a be16 value
+ * makes the code portable across little- and big-endian platforms.
+ */
static __noinline bool sk_dst_port__load_word(struct bpf_sock *sk)
{
__u32 *word = (__u32 *)&sk->dst_port;
- return word[0] == bpf_htonl(0xcafe0000);
+ return word[0] == bpf_htons(0xcafe);
}
static __noinline bool sk_dst_port__load_half(struct bpf_sock *sk)
@@ -281,6 +287,10 @@ int read_sk_dst_port(struct __sk_buff *skb)
if (!sk)
RET_LOG();
+ /* Ignore everything but the SYN from the client socket */
+ if (sk->state != BPF_TCP_SYN_SENT)
+ return CG_OK;
+
if (!sk_dst_port__load_word(sk))
RET_LOG();
if (!sk_dst_port__load_half(sk))
diff --git a/tools/testing/selftests/bpf/progs/test_subskeleton.c b/tools/testing/selftests/bpf/progs/test_subskeleton.c
new file mode 100644
index 000000000000..006417974372
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subskeleton.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* volatile to force a read, compiler may assume 0 otherwise */
+const volatile int rovar1;
+int out1;
+
+/* Override weak symbol in test_subskeleton_lib */
+int var5 = 5;
+
+extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
+
+extern int lib_routine(void);
+
+SEC("raw_tp/sys_enter")
+int handler1(const void *ctx)
+{
+ (void) CONFIG_BPF_SYSCALL;
+
+ out1 = lib_routine() * rovar1;
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_subskeleton_lib.c b/tools/testing/selftests/bpf/progs/test_subskeleton_lib.c
new file mode 100644
index 000000000000..ecfafe812c36
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subskeleton_lib.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* volatile to force a read */
+const volatile int var1;
+volatile int var2 = 1;
+struct {
+ int var3_1;
+ __s64 var3_2;
+} var3;
+int libout1;
+
+extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
+
+int var4[4];
+
+__weak int var5 SEC(".data");
+
+/* Fully contained within library extern-and-definition */
+extern int var6;
+
+int var7 SEC(".data.custom");
+
+int (*fn_ptr)(void);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 16);
+} map1 SEC(".maps");
+
+extern struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 16);
+} map2 SEC(".maps");
+
+int lib_routine(void)
+{
+ __u32 key = 1, value = 2;
+
+ (void) CONFIG_BPF_SYSCALL;
+ bpf_map_update_elem(&map2, &key, &value, BPF_ANY);
+
+ libout1 = var1 + var2 + var3.var3_1 + var3.var3_2 + var5 + var6;
+ return libout1;
+}
+
+SEC("perf_event")
+int lib_perf_handler(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c b/tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c
new file mode 100644
index 000000000000..80238486b7ce
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+int var6 = 6;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 16);
+} map2 SEC(".maps");
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_dtime.c b/tools/testing/selftests/bpf/progs/test_tc_dtime.c
index 9d9e8e17b8a0..06f300d06dbd 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_dtime.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_dtime.c
@@ -174,13 +174,13 @@ int egress_host(struct __sk_buff *skb)
return TC_ACT_OK;
if (skb_proto(skb_type) == IPPROTO_TCP) {
- if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_MONO &&
+ if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO &&
skb->tstamp)
inc_dtimes(EGRESS_ENDHOST);
else
inc_errs(EGRESS_ENDHOST);
} else {
- if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_UNSPEC &&
+ if (skb->tstamp_type == BPF_SKB_TSTAMP_UNSPEC &&
skb->tstamp)
inc_dtimes(EGRESS_ENDHOST);
else
@@ -204,7 +204,7 @@ int ingress_host(struct __sk_buff *skb)
if (!skb_type)
return TC_ACT_OK;
- if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_MONO &&
+ if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO &&
skb->tstamp == EGRESS_FWDNS_MAGIC)
inc_dtimes(INGRESS_ENDHOST);
else
@@ -226,7 +226,7 @@ int ingress_fwdns_prio100(struct __sk_buff *skb)
return TC_ACT_OK;
/* delivery_time is only available to the ingress
- * if the tc-bpf checks the skb->delivery_time_type.
+ * if the tc-bpf checks the skb->tstamp_type.
*/
if (skb->tstamp == EGRESS_ENDHOST_MAGIC)
inc_errs(INGRESS_FWDNS_P100);
@@ -250,7 +250,7 @@ int egress_fwdns_prio100(struct __sk_buff *skb)
return TC_ACT_OK;
/* delivery_time is always available to egress even
- * the tc-bpf did not use the delivery_time_type.
+ * the tc-bpf did not use the tstamp_type.
*/
if (skb->tstamp == INGRESS_FWDNS_MAGIC)
inc_dtimes(EGRESS_FWDNS_P100);
@@ -278,9 +278,9 @@ int ingress_fwdns_prio101(struct __sk_buff *skb)
if (skb_proto(skb_type) == IPPROTO_UDP)
expected_dtime = 0;
- if (skb->delivery_time_type) {
+ if (skb->tstamp_type) {
if (fwdns_clear_dtime() ||
- skb->delivery_time_type != BPF_SKB_DELIVERY_TIME_MONO ||
+ skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO ||
skb->tstamp != expected_dtime)
inc_errs(INGRESS_FWDNS_P101);
else
@@ -290,14 +290,14 @@ int ingress_fwdns_prio101(struct __sk_buff *skb)
inc_errs(INGRESS_FWDNS_P101);
}
- if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_MONO) {
+ if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) {
skb->tstamp = INGRESS_FWDNS_MAGIC;
} else {
- if (bpf_skb_set_delivery_time(skb, INGRESS_FWDNS_MAGIC,
- BPF_SKB_DELIVERY_TIME_MONO))
+ if (bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
+ BPF_SKB_TSTAMP_DELIVERY_MONO))
inc_errs(SET_DTIME);
- if (!bpf_skb_set_delivery_time(skb, INGRESS_FWDNS_MAGIC,
- BPF_SKB_DELIVERY_TIME_UNSPEC))
+ if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
+ BPF_SKB_TSTAMP_UNSPEC))
inc_errs(SET_DTIME);
}
@@ -320,9 +320,9 @@ int egress_fwdns_prio101(struct __sk_buff *skb)
/* Should have handled in prio100 */
return TC_ACT_SHOT;
- if (skb->delivery_time_type) {
+ if (skb->tstamp_type) {
if (fwdns_clear_dtime() ||
- skb->delivery_time_type != BPF_SKB_DELIVERY_TIME_MONO ||
+ skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO ||
skb->tstamp != INGRESS_FWDNS_MAGIC)
inc_errs(EGRESS_FWDNS_P101);
else
@@ -332,14 +332,14 @@ int egress_fwdns_prio101(struct __sk_buff *skb)
inc_errs(EGRESS_FWDNS_P101);
}
- if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_MONO) {
+ if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) {
skb->tstamp = EGRESS_FWDNS_MAGIC;
} else {
- if (bpf_skb_set_delivery_time(skb, EGRESS_FWDNS_MAGIC,
- BPF_SKB_DELIVERY_TIME_MONO))
+ if (bpf_skb_set_tstamp(skb, EGRESS_FWDNS_MAGIC,
+ BPF_SKB_TSTAMP_DELIVERY_MONO))
inc_errs(SET_DTIME);
- if (!bpf_skb_set_delivery_time(skb, EGRESS_FWDNS_MAGIC,
- BPF_SKB_DELIVERY_TIME_UNSPEC))
+ if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
+ BPF_SKB_TSTAMP_UNSPEC))
inc_errs(SET_DTIME);
}
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c
new file mode 100644
index 000000000000..77a123071940
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+#define ETH_ALEN 6
+#define HDR_SZ (sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + sizeof(struct udphdr))
+const volatile int ifindex_out;
+const volatile int ifindex_in;
+const volatile __u8 expect_dst[ETH_ALEN];
+volatile int pkts_seen_xdp = 0;
+volatile int pkts_seen_zero = 0;
+volatile int pkts_seen_tc = 0;
+volatile int retcode = XDP_REDIRECT;
+
+SEC("xdp")
+int xdp_redirect(struct xdp_md *xdp)
+{
+ __u32 *metadata = (void *)(long)xdp->data_meta;
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+
+ __u8 *payload = data + HDR_SZ;
+ int ret = retcode;
+
+ if (payload + 1 > data_end)
+ return XDP_ABORTED;
+
+ if (xdp->ingress_ifindex != ifindex_in)
+ return XDP_ABORTED;
+
+ if (metadata + 1 > data)
+ return XDP_ABORTED;
+
+ if (*metadata != 0x42)
+ return XDP_ABORTED;
+
+ if (*payload == 0) {
+ *payload = 0x42;
+ pkts_seen_zero++;
+ }
+
+ if (bpf_xdp_adjust_meta(xdp, 4))
+ return XDP_ABORTED;
+
+ if (retcode > XDP_PASS)
+ retcode--;
+
+ if (ret == XDP_REDIRECT)
+ return bpf_redirect(ifindex_out, 0);
+
+ return ret;
+}
+
+static bool check_pkt(void *data, void *data_end)
+{
+ struct ipv6hdr *iph = data + sizeof(struct ethhdr);
+ __u8 *payload = data + HDR_SZ;
+
+ if (payload + 1 > data_end)
+ return false;
+
+ if (iph->nexthdr != IPPROTO_UDP || *payload != 0x42)
+ return false;
+
+ /* reset the payload so the same packet doesn't get counted twice when
+ * it cycles back through the kernel path and out the dst veth
+ */
+ *payload = 0;
+ return true;
+}
+
+SEC("xdp")
+int xdp_count_pkts(struct xdp_md *xdp)
+{
+ void *data = (void *)(long)xdp->data;
+ void *data_end = (void *)(long)xdp->data_end;
+
+ if (check_pkt(data, data_end))
+ pkts_seen_xdp++;
+
+ /* Return XDP_DROP to make sure the data page is recycled, like when it
+ * exits a physical NIC. Recycled pages will be counted in the
+ * pkts_seen_zero counter above.
+ */
+ return XDP_DROP;
+}
+
+SEC("tc")
+int tc_count_pkts(struct __sk_buff *skb)
+{
+ void *data = (void *)(long)skb->data;
+ void *data_end = (void *)(long)skb->data_end;
+
+ if (check_pkt(data, data_end))
+ pkts_seen_tc++;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index 5b8314cd77fd..d6a1be4d8020 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -36,7 +36,7 @@ int main(int argc, char **argv)
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
};
- size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+ size_t insns_cnt = ARRAY_SIZE(prog);
int error = EXIT_FAILURE;
int map_fd, percpu_map_fd, prog_fd, cgroup_fd;
struct bpf_cgroup_storage_key key;
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh
index ec4e15948e40..5252b91f48a1 100755
--- a/tools/testing/selftests/bpf/test_lirc_mode2.sh
+++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh
@@ -3,6 +3,7 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
+ret=$ksft_skip
msg="skip all tests:"
if [ $UID != 0 ]; then
@@ -25,7 +26,7 @@ do
fi
done
-if [ -n $LIRCDEV ];
+if [ -n "$LIRCDEV" ];
then
TYPE=lirc_mode2
./test_lirc_mode2_user $LIRCDEV $INPUTDEV
@@ -36,3 +37,5 @@ then
echo -e ${GREEN}"PASS: $TYPE"${NC}
fi
fi
+
+exit $ret
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index 6e6235185a86..563bbe18c172 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -878,11 +878,11 @@ int main(int argc, char **argv)
assert(nr_cpus != -1);
printf("nr_cpus:%d\n\n", nr_cpus);
- for (f = 0; f < sizeof(map_flags) / sizeof(*map_flags); f++) {
+ for (f = 0; f < ARRAY_SIZE(map_flags); f++) {
unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ?
PERCPU_FREE_TARGET : LOCAL_FREE_TARGET;
- for (t = 0; t < sizeof(map_types) / sizeof(*map_types); t++) {
+ for (t = 0; t < ARRAY_SIZE(map_types); t++) {
test_lru_sanity0(map_types[t], map_flags[f]);
test_lru_sanity1(map_types[t], map_flags[f], tgt_free);
test_lru_sanity2(map_types[t], map_flags[f], tgt_free);
diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
index b497bb85b667..6c69c42b1d60 100755
--- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
+++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
@@ -120,6 +120,14 @@ setup()
ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0
ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0
+ # disable IPv6 DAD because it sometimes takes too long and fails tests
+ ip netns exec ${NS1} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${NS3} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${NS1} sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip netns exec ${NS2} sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip netns exec ${NS3} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
ip link add veth1 type veth peer name veth2
ip link add veth3 type veth peer name veth4
ip link add veth5 type veth peer name veth6
@@ -289,7 +297,7 @@ test_ping()
ip netns exec ${NS1} ping -c 1 -W 1 -I veth1 ${IPv4_DST} 2>&1 > /dev/null
RET=$?
elif [ "${PROTO}" == "IPv6" ] ; then
- ip netns exec ${NS1} ping6 -c 1 -W 6 -I veth1 ${IPv6_DST} 2>&1 > /dev/null
+ ip netns exec ${NS1} ping6 -c 1 -W 1 -I veth1 ${IPv6_DST} 2>&1 > /dev/null
RET=$?
else
echo " test_ping: unknown PROTO: ${PROTO}"
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index f0c8d05ba6d1..f3d5d7ac6505 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -723,7 +723,7 @@ static int xmsg_ret_only_prog_load(const struct sock_addr_test *test,
BPF_MOV64_IMM(BPF_REG_0, rc),
BPF_EXIT_INSN(),
};
- return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
+ return load_insns(test, insns, ARRAY_SIZE(insns));
}
static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
@@ -795,7 +795,7 @@ static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
BPF_EXIT_INSN(),
};
- return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
+ return load_insns(test, insns, ARRAY_SIZE(insns));
}
static int recvmsg4_rw_c_prog_load(const struct sock_addr_test *test)
@@ -858,7 +858,7 @@ static int sendmsg6_rw_dst_asm_prog_load(const struct sock_addr_test *test,
BPF_EXIT_INSN(),
};
- return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
+ return load_insns(test, insns, ARRAY_SIZE(insns));
}
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test)
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 1ba7e7346afb..dfb4f5c0fcb9 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -1786,7 +1786,7 @@ static int populate_progs(char *bpf_file)
i++;
}
- for (i = 0; i < sizeof(map_fd)/sizeof(int); i++) {
+ for (i = 0; i < ARRAY_SIZE(map_fd); i++) {
maps[i] = bpf_object__find_map_by_name(obj, map_names[i]);
map_fd[i] = bpf_map__fd(maps[i]);
if (map_fd[i] < 0) {
@@ -1867,7 +1867,7 @@ static int __test_selftests(int cg_fd, struct sockmap_options *opt)
}
/* Tests basic commands and APIs */
- for (i = 0; i < sizeof(test)/sizeof(struct _test); i++) {
+ for (i = 0; i < ARRAY_SIZE(test); i++) {
struct _test t = test[i];
if (check_whitelist(&t, opt) != 0)
diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh
index ca1372924023..2817d9948d59 100755
--- a/tools/testing/selftests/bpf/test_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tunnel.sh
@@ -39,7 +39,7 @@
# from root namespace, the following operations happen:
# 1) Route lookup shows 10.1.1.100/24 belongs to tnl dev, fwd to tnl dev.
# 2) Tnl device's egress BPF program is triggered and set the tunnel metadata,
-# with remote_ip=172.16.1.200 and others.
+# with remote_ip=172.16.1.100 and others.
# 3) Outer tunnel header is prepended and route the packet to veth1's egress
# 4) veth0's ingress queue receive the tunneled packet at namespace at_ns0
# 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 92e3465fbae8..a2cd236c32eb 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -22,8 +22,6 @@
#include <limits.h>
#include <assert.h>
-#include <sys/capability.h>
-
#include <linux/unistd.h>
#include <linux/filter.h>
#include <linux/bpf_perf_event.h>
@@ -42,6 +40,7 @@
# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
# endif
#endif
+#include "cap_helpers.h"
#include "bpf_rand.h"
#include "bpf_util.h"
#include "test_btf.h"
@@ -62,6 +61,10 @@
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
+/* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
+#define ADMIN_CAPS (1ULL << CAP_NET_ADMIN | \
+ 1ULL << CAP_PERFMON | \
+ 1ULL << CAP_BPF)
#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
static bool unpriv_disabled = false;
static int skips;
@@ -973,47 +976,19 @@ struct libcap {
static int set_admin(bool admin)
{
- cap_t caps;
- /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
- const cap_value_t cap_net_admin = CAP_NET_ADMIN;
- const cap_value_t cap_sys_admin = CAP_SYS_ADMIN;
- struct libcap *cap;
- int ret = -1;
-
- caps = cap_get_proc();
- if (!caps) {
- perror("cap_get_proc");
- return -1;
- }
- cap = (struct libcap *)caps;
- if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_sys_admin, CAP_CLEAR)) {
- perror("cap_set_flag clear admin");
- goto out;
- }
- if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_admin,
- admin ? CAP_SET : CAP_CLEAR)) {
- perror("cap_set_flag set_or_clear net");
- goto out;
- }
- /* libcap is likely old and simply ignores CAP_BPF and CAP_PERFMON,
- * so update effective bits manually
- */
+ int err;
+
if (admin) {
- cap->data[1].effective |= 1 << (38 /* CAP_PERFMON */ - 32);
- cap->data[1].effective |= 1 << (39 /* CAP_BPF */ - 32);
+ err = cap_enable_effective(ADMIN_CAPS, NULL);
+ if (err)
+ perror("cap_enable_effective(ADMIN_CAPS)");
} else {
- cap->data[1].effective &= ~(1 << (38 - 32));
- cap->data[1].effective &= ~(1 << (39 - 32));
- }
- if (cap_set_proc(caps)) {
- perror("cap_set_proc");
- goto out;
+ err = cap_disable_effective(ADMIN_CAPS, NULL);
+ if (err)
+ perror("cap_disable_effective(ADMIN_CAPS)");
}
- ret = 0;
-out:
- if (cap_free(caps))
- perror("cap_free");
- return ret;
+
+ return err;
}
static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
@@ -1291,31 +1266,18 @@ fail_log:
static bool is_admin(void)
{
- cap_flag_value_t net_priv = CAP_CLEAR;
- bool perfmon_priv = false;
- bool bpf_priv = false;
- struct libcap *cap;
- cap_t caps;
-
-#ifdef CAP_IS_SUPPORTED
- if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
- perror("cap_get_flag");
- return false;
- }
-#endif
- caps = cap_get_proc();
- if (!caps) {
- perror("cap_get_proc");
+ __u64 caps;
+
+ /* The test checks for finer cap as CAP_NET_ADMIN,
+ * CAP_PERFMON, and CAP_BPF instead of CAP_SYS_ADMIN.
+ * Thus, disable CAP_SYS_ADMIN at the beginning.
+ */
+ if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps)) {
+ perror("cap_disable_effective(CAP_SYS_ADMIN)");
return false;
}
- cap = (struct libcap *)caps;
- bpf_priv = cap->data[1].effective & (1 << (39/* CAP_BPF */ - 32));
- perfmon_priv = cap->data[1].effective & (1 << (38/* CAP_PERFMON */ - 32));
- if (cap_get_flag(caps, CAP_NET_ADMIN, CAP_EFFECTIVE, &net_priv))
- perror("cap_get_flag NET");
- if (cap_free(caps))
- perror("cap_free");
- return bpf_priv && perfmon_priv && net_priv == CAP_SET;
+
+ return (caps & ADMIN_CAPS) == ADMIN_CAPS;
}
static void get_unpriv_disabled()
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index ca6abae9b09c..3d6217e3aff7 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -34,6 +34,13 @@ int load_kallsyms(void)
if (!f)
return -ENOENT;
+ /*
+ * This is called/used from multiplace places,
+ * load symbols just once.
+ */
+ if (sym_cnt)
+ return 0;
+
while (fgets(buf, sizeof(buf), f)) {
if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
break;
diff --git a/tools/testing/selftests/bpf/verifier/bounds_deduction.c b/tools/testing/selftests/bpf/verifier/bounds_deduction.c
index 91869aea6d64..3931c481e30c 100644
--- a/tools/testing/selftests/bpf/verifier/bounds_deduction.c
+++ b/tools/testing/selftests/bpf/verifier/bounds_deduction.c
@@ -105,7 +105,7 @@
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 has pointer with unsupported alu operation",
- .errstr = "dereference of modified ctx ptr",
+ .errstr = "negative offset ctx ptr R1 off=-1 disallowed",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index f890333259ad..2e03decb11b6 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -116,6 +116,89 @@
},
},
{
+ "calls: invalid kfunc call: reg->off must be zero when passed to release kfunc",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "R1 must have zero offset when passed to release func",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_acquire", 3 },
+ { "bpf_kfunc_call_memb_release", 8 },
+ },
+},
+{
+ "calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 16),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_acquire", 3 },
+ { "bpf_kfunc_call_test_release", 9 },
+ },
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr = "negative offset ptr_ ptr R1 off=-4 disallowed",
+},
+{
+ "calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_acquire", 3 },
+ { "bpf_kfunc_call_test_release", 9 },
+ { "bpf_kfunc_call_test_release", 13 },
+ { "bpf_kfunc_call_test_release", 17 },
+ },
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
+},
+{
"calls: basic sanity",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c
index 60f6fbe03f19..c8eaf0536c24 100644
--- a/tools/testing/selftests/bpf/verifier/ctx.c
+++ b/tools/testing/selftests/bpf/verifier/ctx.c
@@ -58,7 +58,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "dereference of modified ctx ptr",
+ .errstr = "negative offset ctx ptr R1 off=-612 disallowed",
},
{
"pass modified ctx pointer to helper, 2",
@@ -71,8 +71,8 @@
},
.result_unpriv = REJECT,
.result = REJECT,
- .errstr_unpriv = "dereference of modified ctx ptr",
- .errstr = "dereference of modified ctx ptr",
+ .errstr_unpriv = "negative offset ctx ptr R1 off=-612 disallowed",
+ .errstr = "negative offset ctx ptr R1 off=-612 disallowed",
},
{
"pass modified ctx pointer to helper, 3",
@@ -141,7 +141,7 @@
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
.result = REJECT,
- .errstr = "dereference of modified ctx ptr",
+ .errstr = "negative offset ctx ptr R1 off=-612 disallowed",
},
{
"pass ctx or null check, 5: null (connect)",