aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/bpftool/Makefile2
-rw-r--r--tools/bpf/bpftool/gen.c394
-rw-r--r--tools/bpf/bpftool/main.c7
-rw-r--r--tools/bpf/bpftool/main.h1
-rw-r--r--tools/bpf/bpftool/prog.c107
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c3
-rw-r--r--tools/include/uapi/linux/bpf.h39
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/bpf_gen_internal.h41
-rw-r--r--tools/lib/bpf/gen_loader.c729
-rw-r--r--tools/lib/bpf/libbpf.c427
-rw-r--r--tools/lib/bpf/libbpf.h67
-rw-r--r--tools/lib/bpf/libbpf.map7
-rw-r--r--tools/lib/bpf/libbpf_internal.h2
-rw-r--r--tools/lib/bpf/linker.c18
-rw-r--r--tools/lib/bpf/netlink.c568
-rw-r--r--tools/lib/bpf/nlattr.h48
-rw-r--r--tools/lib/bpf/skel_internal.h123
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/atomics.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_fexit.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_test.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_sleep.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_test.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms_module.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skeleton.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/static_linked.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/syscall.c55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_bpf.c395
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_printk.c5
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c4
-rw-r--r--tools/testing/selftests/bpf/progs/kfree_skb.c4
-rw-r--r--tools/testing/selftests/bpf/progs/linked_maps1.c2
-rw-r--r--tools/testing/selftests/bpf/progs/syscall.c121
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall3.c2
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall4.c2
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall5.c2
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c2
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_check_mtu.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_args.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_rdonly_maps.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_skeleton.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_snprintf_single.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_listen.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_static_linked1.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_static_linked2.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_bpf.c12
-rw-r--r--tools/testing/selftests/bpf/progs/trace_printk.c6
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/port_scale.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh69
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh14
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh24
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_sample.sh12
-rwxr-xr-xtools/testing/selftests/net/devlink_port_split.py8
-rwxr-xr-xtools/testing/selftests/net/forwarding/custom_multipath_hash.sh364
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh456
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh458
-rwxr-xr-xtools/testing/selftests/net/unicast_extensions.sh17
66 files changed, 4452 insertions, 390 deletions
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index b3073ae84018..d16d289ade7a 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -136,7 +136,7 @@ endif
BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool
-BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o)
+BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o) $(OUTPUT)disasm.o
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 31ade77f5ef8..13b0aa789178 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -18,6 +18,7 @@
#include <sys/stat.h>
#include <sys/mman.h>
#include <bpf/btf.h>
+#include <bpf/bpf_gen_internal.h>
#include "json_writer.h"
#include "main.h"
@@ -106,8 +107,10 @@ static int codegen_datasec_def(struct bpf_object *obj,
if (strcmp(sec_name, ".data") == 0) {
sec_ident = "data";
+ strip_mods = true;
} else if (strcmp(sec_name, ".bss") == 0) {
sec_ident = "bss";
+ strip_mods = true;
} else if (strcmp(sec_name, ".rodata") == 0) {
sec_ident = "rodata";
strip_mods = true;
@@ -129,6 +132,10 @@ static int codegen_datasec_def(struct bpf_object *obj,
int need_off = sec_var->offset, align_off, align;
__u32 var_type_id = var->type;
+ /* static variables are not exposed through BPF skeleton */
+ if (btf_var(var)->linkage == BTF_VAR_STATIC)
+ continue;
+
if (off > need_off) {
p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
sec_name, i, need_off, off);
@@ -268,6 +275,327 @@ static void codegen(const char *template, ...)
free(s);
}
+static void print_hex(const char *data, int data_sz)
+{
+ int i, len;
+
+ for (i = 0, len = 0; i < data_sz; i++) {
+ int w = data[i] ? 4 : 2;
+
+ len += w;
+ if (len > 78) {
+ printf("\\\n");
+ len = w;
+ }
+ if (!data[i])
+ printf("\\0");
+ else
+ printf("\\x%02x", (unsigned char)data[i]);
+ }
+}
+
+static size_t bpf_map_mmap_sz(const struct bpf_map *map)
+{
+ long page_sz = sysconf(_SC_PAGE_SIZE);
+ size_t map_sz;
+
+ map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
+ map_sz = roundup(map_sz, page_sz);
+ return map_sz;
+}
+
+static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
+{
+ struct bpf_program *prog;
+
+ bpf_object__for_each_program(prog, obj) {
+ const char *tp_name;
+
+ codegen("\
+ \n\
+ \n\
+ static inline int \n\
+ %1$s__%2$s__attach(struct %1$s *skel) \n\
+ { \n\
+ int prog_fd = skel->progs.%2$s.prog_fd; \n\
+ ", obj_name, bpf_program__name(prog));
+
+ switch (bpf_program__get_type(prog)) {
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
+ printf("\tint fd = bpf_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
+ break;
+ case BPF_PROG_TYPE_TRACING:
+ printf("\tint fd = bpf_raw_tracepoint_open(NULL, prog_fd);\n");
+ break;
+ default:
+ printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
+ break;
+ }
+ codegen("\
+ \n\
+ \n\
+ if (fd > 0) \n\
+ skel->links.%1$s_fd = fd; \n\
+ return fd; \n\
+ } \n\
+ ", bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ \n\
+ static inline int \n\
+ %1$s__attach(struct %1$s *skel) \n\
+ { \n\
+ int ret = 0; \n\
+ \n\
+ ", obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
+ ", obj_name, bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ return ret < 0 ? ret : 0; \n\
+ } \n\
+ \n\
+ static inline void \n\
+ %1$s__detach(struct %1$s *skel) \n\
+ { \n\
+ ", obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ skel_closenz(skel->links.%1$s_fd); \n\
+ ", bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ } \n\
+ ");
+}
+
+static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
+{
+ struct bpf_program *prog;
+ struct bpf_map *map;
+
+ codegen("\
+ \n\
+ static void \n\
+ %1$s__destroy(struct %1$s *skel) \n\
+ { \n\
+ if (!skel) \n\
+ return; \n\
+ %1$s__detach(skel); \n\
+ ",
+ obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ skel_closenz(skel->progs.%1$s.prog_fd); \n\
+ ", bpf_program__name(prog));
+ }
+
+ bpf_object__for_each_map(map, obj) {
+ const char * ident;
+
+ ident = get_map_ident(map);
+ if (!ident)
+ continue;
+ if (bpf_map__is_internal(map) &&
+ (bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
+ printf("\tmunmap(skel->%1$s, %2$zd);\n",
+ ident, bpf_map_mmap_sz(map));
+ codegen("\
+ \n\
+ skel_closenz(skel->maps.%1$s.map_fd); \n\
+ ", ident);
+ }
+ codegen("\
+ \n\
+ free(skel); \n\
+ } \n\
+ ",
+ obj_name);
+}
+
+static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
+{
+ struct bpf_object_load_attr load_attr = {};
+ DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
+ struct bpf_map *map;
+ int err = 0;
+
+ err = bpf_object__gen_loader(obj, &opts);
+ if (err)
+ return err;
+
+ load_attr.obj = obj;
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ load_attr.log_level = 1 + 2 + 4;
+
+ err = bpf_object__load_xattr(&load_attr);
+ if (err) {
+ p_err("failed to load object file");
+ goto out;
+ }
+ /* If there was no error during load then gen_loader_opts
+ * are populated with the loader program.
+ */
+
+ /* finish generating 'struct skel' */
+ codegen("\
+ \n\
+ }; \n\
+ ", obj_name);
+
+
+ codegen_attach_detach(obj, obj_name);
+
+ codegen_destroy(obj, obj_name);
+
+ codegen("\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open(void) \n\
+ { \n\
+ struct %1$s *skel; \n\
+ \n\
+ skel = calloc(sizeof(*skel), 1); \n\
+ if (!skel) \n\
+ goto cleanup; \n\
+ skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
+ ",
+ obj_name, opts.data_sz);
+ bpf_object__for_each_map(map, obj) {
+ const char *ident;
+ const void *mmap_data = NULL;
+ size_t mmap_size = 0;
+
+ ident = get_map_ident(map);
+ if (!ident)
+ continue;
+
+ if (!bpf_map__is_internal(map) ||
+ !(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
+ continue;
+
+ codegen("\
+ \n\
+ skel->%1$s = \n\
+ mmap(NULL, %2$zd, PROT_READ | PROT_WRITE,\n\
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0); \n\
+ if (skel->%1$s == (void *) -1) \n\
+ goto cleanup; \n\
+ memcpy(skel->%1$s, (void *)\"\\ \n\
+ ", ident, bpf_map_mmap_sz(map));
+ mmap_data = bpf_map__initial_value(map, &mmap_size);
+ print_hex(mmap_data, mmap_size);
+ printf("\", %2$zd);\n"
+ "\tskel->maps.%1$s.initial_value = (__u64)(long)skel->%1$s;\n",
+ ident, mmap_size);
+ }
+ codegen("\
+ \n\
+ return skel; \n\
+ cleanup: \n\
+ %1$s__destroy(skel); \n\
+ return NULL; \n\
+ } \n\
+ \n\
+ static inline int \n\
+ %1$s__load(struct %1$s *skel) \n\
+ { \n\
+ struct bpf_load_and_run_opts opts = {}; \n\
+ int err; \n\
+ \n\
+ opts.ctx = (struct bpf_loader_ctx *)skel; \n\
+ opts.data_sz = %2$d; \n\
+ opts.data = (void *)\"\\ \n\
+ ",
+ obj_name, opts.data_sz);
+ print_hex(opts.data, opts.data_sz);
+ codegen("\
+ \n\
+ \"; \n\
+ ");
+
+ codegen("\
+ \n\
+ opts.insns_sz = %d; \n\
+ opts.insns = (void *)\"\\ \n\
+ ",
+ opts.insns_sz);
+ print_hex(opts.insns, opts.insns_sz);
+ codegen("\
+ \n\
+ \"; \n\
+ err = bpf_load_and_run(&opts); \n\
+ if (err < 0) \n\
+ return err; \n\
+ ", obj_name);
+ bpf_object__for_each_map(map, obj) {
+ const char *ident, *mmap_flags;
+
+ ident = get_map_ident(map);
+ if (!ident)
+ continue;
+
+ if (!bpf_map__is_internal(map) ||
+ !(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
+ continue;
+ if (bpf_map__def(map)->map_flags & BPF_F_RDONLY_PROG)
+ mmap_flags = "PROT_READ";
+ else
+ mmap_flags = "PROT_READ | PROT_WRITE";
+
+ printf("\tskel->%1$s =\n"
+ "\t\tmmap(skel->%1$s, %2$zd, %3$s, MAP_SHARED | MAP_FIXED,\n"
+ "\t\t\tskel->maps.%1$s.map_fd, 0);\n",
+ ident, bpf_map_mmap_sz(map), mmap_flags);
+ }
+ codegen("\
+ \n\
+ return 0; \n\
+ } \n\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open_and_load(void) \n\
+ { \n\
+ struct %1$s *skel; \n\
+ \n\
+ skel = %1$s__open(); \n\
+ if (!skel) \n\
+ return NULL; \n\
+ if (%1$s__load(skel)) { \n\
+ %1$s__destroy(skel); \n\
+ return NULL; \n\
+ } \n\
+ return skel; \n\
+ } \n\
+ ", obj_name);
+
+ codegen("\
+ \n\
+ \n\
+ #endif /* %s */ \n\
+ ",
+ header_guard);
+ err = 0;
+out:
+ return err;
+}
+
static int do_skeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
@@ -277,7 +605,7 @@ static int do_skeleton(int argc, char **argv)
struct bpf_object *obj = NULL;
const char *file, *ident;
struct bpf_program *prog;
- int fd, len, err = -1;
+ int fd, err = -1;
struct bpf_map *map;
struct btf *btf;
struct stat st;
@@ -359,7 +687,25 @@ static int do_skeleton(int argc, char **argv)
}
get_header_guard(header_guard, obj_name);
- codegen("\
+ if (use_loader) {
+ codegen("\
+ \n\
+ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
+ /* THIS FILE IS AUTOGENERATED! */ \n\
+ #ifndef %2$s \n\
+ #define %2$s \n\
+ \n\
+ #include <stdlib.h> \n\
+ #include <bpf/bpf.h> \n\
+ #include <bpf/skel_internal.h> \n\
+ \n\
+ struct %1$s { \n\
+ struct bpf_loader_ctx ctx; \n\
+ ",
+ obj_name, header_guard
+ );
+ } else {
+ codegen("\
\n\
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
\n\
@@ -375,7 +721,8 @@ static int do_skeleton(int argc, char **argv)
struct bpf_object *obj; \n\
",
obj_name, header_guard
- );
+ );
+ }
if (map_cnt) {
printf("\tstruct {\n");
@@ -383,7 +730,10 @@ static int do_skeleton(int argc, char **argv)
ident = get_map_ident(map);
if (!ident)
continue;
- printf("\t\tstruct bpf_map *%s;\n", ident);
+ if (use_loader)
+ printf("\t\tstruct bpf_map_desc %s;\n", ident);
+ else
+ printf("\t\tstruct bpf_map *%s;\n", ident);
}
printf("\t} maps;\n");
}
@@ -391,14 +741,22 @@ static int do_skeleton(int argc, char **argv)
if (prog_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
- printf("\t\tstruct bpf_program *%s;\n",
- bpf_program__name(prog));
+ if (use_loader)
+ printf("\t\tstruct bpf_prog_desc %s;\n",
+ bpf_program__name(prog));
+ else
+ printf("\t\tstruct bpf_program *%s;\n",
+ bpf_program__name(prog));
}
printf("\t} progs;\n");
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
- printf("\t\tstruct bpf_link *%s;\n",
- bpf_program__name(prog));
+ if (use_loader)
+ printf("\t\tint %s_fd;\n",
+ bpf_program__name(prog));
+ else
+ printf("\t\tstruct bpf_link *%s;\n",
+ bpf_program__name(prog));
}
printf("\t} links;\n");
}
@@ -409,6 +767,10 @@ static int do_skeleton(int argc, char **argv)
if (err)
goto out;
}
+ if (use_loader) {
+ err = gen_trace(obj, obj_name, header_guard);
+ goto out;
+ }
codegen("\
\n\
@@ -578,19 +940,7 @@ static int do_skeleton(int argc, char **argv)
file_sz);
/* embed contents of BPF object file */
- for (i = 0, len = 0; i < file_sz; i++) {
- int w = obj_data[i] ? 4 : 2;
-
- len += w;
- if (len > 78) {
- printf("\\\n");
- len = w;
- }
- if (!obj_data[i])
- printf("\\0");
- else
- printf("\\x%02x", (unsigned char)obj_data[i]);
- }
+ print_hex(obj_data, file_sz);
codegen("\
\n\
@@ -636,7 +986,7 @@ static int do_object(int argc, char **argv)
while (argc) {
file = GET_ARG();
- err = bpf_linker__add_file(linker, file);
+ err = bpf_linker__add_file(linker, file, NULL);
if (err) {
p_err("failed to link '%s': %s (%d)", file, strerror(err), err);
goto out;
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index d9afb730136a..7f2817d97079 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -29,6 +29,7 @@ bool show_pinned;
bool block_mount;
bool verifier_logs;
bool relaxed_maps;
+bool use_loader;
struct btf *base_btf;
struct pinned_obj_table prog_table;
struct pinned_obj_table map_table;
@@ -392,6 +393,7 @@ int main(int argc, char **argv)
{ "mapcompat", no_argument, NULL, 'm' },
{ "nomount", no_argument, NULL, 'n' },
{ "debug", no_argument, NULL, 'd' },
+ { "use-loader", no_argument, NULL, 'L' },
{ "base-btf", required_argument, NULL, 'B' },
{ 0 }
};
@@ -409,7 +411,7 @@ int main(int argc, char **argv)
hash_init(link_table.table);
opterr = 0;
- while ((opt = getopt_long(argc, argv, "VhpjfmndB:",
+ while ((opt = getopt_long(argc, argv, "VhpjfLmndB:",
options, NULL)) >= 0) {
switch (opt) {
case 'V':
@@ -452,6 +454,9 @@ int main(int argc, char **argv)
return -1;
}
break;
+ case 'L':
+ use_loader = true;
+ break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 76e91641262b..c1cf29798b99 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -90,6 +90,7 @@ extern bool show_pids;
extern bool block_mount;
extern bool verifier_logs;
extern bool relaxed_maps;
+extern bool use_loader;
extern struct btf *base_btf;
extern struct pinned_obj_table prog_table;
extern struct pinned_obj_table map_table;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index da4846c9856a..cc48726740ad 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -16,6 +16,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/syscall.h>
+#include <dirent.h>
#include <linux/err.h>
#include <linux/perf_event.h>
@@ -24,6 +25,8 @@
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
+#include <bpf/bpf_gen_internal.h>
+#include <bpf/skel_internal.h>
#include "cfg.h"
#include "main.h"
@@ -1499,7 +1502,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
set_max_rlimit();
obj = bpf_object__open_file(file, &open_opts);
- if (IS_ERR_OR_NULL(obj)) {
+ if (libbpf_get_error(obj)) {
p_err("failed to open object file");
goto err_free_reuse_maps;
}
@@ -1645,8 +1648,110 @@ err_free_reuse_maps:
return -1;
}
+static int count_open_fds(void)
+{
+ DIR *dp = opendir("/proc/self/fd");
+ struct dirent *de;
+ int cnt = -3;
+
+ if (!dp)
+ return -1;
+
+ while ((de = readdir(dp)))
+ cnt++;
+
+ closedir(dp);
+ return cnt;
+}
+
+static int try_loader(struct gen_loader_opts *gen)
+{
+ struct bpf_load_and_run_opts opts = {};
+ struct bpf_loader_ctx *ctx;
+ int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
+ sizeof(struct bpf_prog_desc));
+ int log_buf_sz = (1u << 24) - 1;
+ int err, fds_before, fd_delta;
+ char *log_buf;
+
+ ctx = alloca(ctx_sz);
+ memset(ctx, 0, ctx_sz);
+ ctx->sz = ctx_sz;
+ ctx->log_level = 1;
+ ctx->log_size = log_buf_sz;
+ log_buf = malloc(log_buf_sz);
+ if (!log_buf)
+ return -ENOMEM;
+ ctx->log_buf = (long) log_buf;
+ opts.ctx = ctx;
+ opts.data = gen->data;
+ opts.data_sz = gen->data_sz;
+ opts.insns = gen->insns;
+ opts.insns_sz = gen->insns_sz;
+ fds_before = count_open_fds();
+ err = bpf_load_and_run(&opts);
+ fd_delta = count_open_fds() - fds_before;
+ if (err < 0) {
+ fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
+ if (fd_delta)
+ fprintf(stderr, "loader prog leaked %d FDs\n",
+ fd_delta);
+ }
+ free(log_buf);
+ return err;
+}
+
+static int do_loader(int argc, char **argv)
+{
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
+ DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
+ struct bpf_object_load_attr load_attr = {};
+ struct bpf_object *obj;
+ const char *file;
+ int err = 0;
+
+ if (!REQ_ARGS(1))
+ return -1;
+ file = GET_ARG();
+
+ obj = bpf_object__open_file(file, &open_opts);
+ if (libbpf_get_error(obj)) {
+ p_err("failed to open object file");
+ goto err_close_obj;
+ }
+
+ err = bpf_object__gen_loader(obj, &gen);
+ if (err)
+ goto err_close_obj;
+
+ load_attr.obj = obj;
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ load_attr.log_level = 1 + 2 + 4;
+
+ err = bpf_object__load_xattr(&load_attr);
+ if (err) {
+ p_err("failed to load object file");
+ goto err_close_obj;
+ }
+
+ if (verifier_logs) {
+ struct dump_data dd = {};
+
+ kernel_syms_load(&dd);
+ dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
+ kernel_syms_destroy(&dd);
+ }
+ err = try_loader(&gen);
+err_close_obj:
+ bpf_object__close(obj);
+ return err;
+}
+
static int do_load(int argc, char **argv)
{
+ if (use_loader)
+ return do_loader(argc, argv);
return load_with_options(argc, argv, true);
}
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index 6fc3e6f7f40c..f1f32e21d5cd 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -196,6 +196,9 @@ static const char *print_imm(void *private_data,
else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[id:%u][0]+%u", insn->imm, (insn + 1)->imm);
+ else if (insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "map[idx:%u]+%u", insn->imm, (insn + 1)->imm);
else if (insn->src_reg == BPF_PSEUDO_FUNC)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"subprog[%+d]", insn->imm);
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index ec6d85a81744..418b9b813d65 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -837,6 +837,7 @@ enum bpf_cmd {
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
BPF_PROG_TEST_RUN,
+ BPF_PROG_RUN = BPF_PROG_TEST_RUN,
BPF_PROG_GET_NEXT_ID,
BPF_MAP_GET_NEXT_ID,
BPF_PROG_GET_FD_BY_ID,
@@ -937,6 +938,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_EXT,
BPF_PROG_TYPE_LSM,
BPF_PROG_TYPE_SK_LOOKUP,
+ BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
};
enum bpf_attach_type {
@@ -1097,8 +1099,8 @@ enum bpf_link_type {
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
* the following extensions:
*
- * insn[0].src_reg: BPF_PSEUDO_MAP_FD
- * insn[0].imm: map fd
+ * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX]
+ * insn[0].imm: map fd or fd_idx
* insn[1].imm: 0
* insn[0].off: 0
* insn[1].off: 0
@@ -1106,15 +1108,19 @@ enum bpf_link_type {
* verifier type: CONST_PTR_TO_MAP
*/
#define BPF_PSEUDO_MAP_FD 1
-/* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE
- * insn[0].imm: map fd
+#define BPF_PSEUDO_MAP_IDX 5
+
+/* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE
+ * insn[0].imm: map fd or fd_idx
* insn[1].imm: offset into value
* insn[0].off: 0
* insn[1].off: 0
* ldimm64 rewrite: address of map[0]+offset
* verifier type: PTR_TO_MAP_VALUE
*/
-#define BPF_PSEUDO_MAP_VALUE 2
+#define BPF_PSEUDO_MAP_VALUE 2
+#define BPF_PSEUDO_MAP_IDX_VALUE 6
+
/* insn[0].src_reg: BPF_PSEUDO_BTF_ID
* insn[0].imm: kernel btd id of VAR
* insn[1].imm: 0
@@ -1314,6 +1320,8 @@ union bpf_attr {
/* or valid module BTF object fd or 0 to attach to vmlinux */
__u32 attach_btf_obj_fd;
};
+ __u32 :32; /* pad */
+ __aligned_u64 fd_array; /* array of FDs */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -4735,6 +4743,24 @@ union bpf_attr {
* be zero-terminated except when **str_size** is 0.
*
* Or **-EBUSY** if the per-CPU memory copy buffer is busy.
+ *
+ * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size)
+ * Description
+ * Execute bpf syscall with given arguments.
+ * Return
+ * A syscall result.
+ *
+ * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags)
+ * Description
+ * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
+ * Return
+ * Returns btf_id and btf_obj_fd in lower and upper 32 bits.
+ *
+ * long bpf_sys_close(u32 fd)
+ * Description
+ * Execute close syscall for given FD.
+ * Return
+ * A syscall result.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -4903,6 +4929,9 @@ union bpf_attr {
FN(check_mtu), \
FN(for_each_map_elem), \
FN(snprintf), \
+ FN(sys_bpf), \
+ FN(btf_find_by_name_kind), \
+ FN(sys_close), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 9b057cc7650a..430f6874fa41 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,3 +1,3 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
- btf_dump.o ringbuf.o strset.o linker.o
+ btf_dump.o ringbuf.o strset.o linker.o gen_loader.o
diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h
new file mode 100644
index 000000000000..615400391e57
--- /dev/null
+++ b/tools/lib/bpf/bpf_gen_internal.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2021 Facebook */
+#ifndef __BPF_GEN_INTERNAL_H
+#define __BPF_GEN_INTERNAL_H
+
+struct ksym_relo_desc {
+ const char *name;
+ int kind;
+ int insn_idx;
+};
+
+struct bpf_gen {
+ struct gen_loader_opts *opts;
+ void *data_start;
+ void *data_cur;
+ void *insn_start;
+ void *insn_cur;
+ ssize_t cleanup_label;
+ __u32 nr_progs;
+ __u32 nr_maps;
+ int log_level;
+ int error;
+ struct ksym_relo_desc *relos;
+ int relo_cnt;
+ char attach_target[128];
+ int attach_kind;
+};
+
+void bpf_gen__init(struct bpf_gen *gen, int log_level);
+int bpf_gen__finish(struct bpf_gen *gen);
+void bpf_gen__free(struct bpf_gen *gen);
+void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
+void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_attr *map_attr, int map_idx);
+struct bpf_prog_load_params;
+void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx);
+void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
+void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
+void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
+void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind, int insn_idx);
+
+#endif
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
new file mode 100644
index 000000000000..8df718a6b142
--- /dev/null
+++ b/tools/lib/bpf/gen_loader.c
@@ -0,0 +1,729 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2021 Facebook */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <linux/filter.h>
+#include "btf.h"
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_internal.h"
+#include "hashmap.h"
+#include "bpf_gen_internal.h"
+#include "skel_internal.h"
+
+#define MAX_USED_MAPS 64
+#define MAX_USED_PROGS 32
+
+/* The following structure describes the stack layout of the loader program.
+ * In addition R6 contains the pointer to context.
+ * R7 contains the result of the last sys_bpf command (typically error or FD).
+ * R9 contains the result of the last sys_close command.
+ *
+ * Naming convention:
+ * ctx - bpf program context
+ * stack - bpf program stack
+ * blob - bpf_attr-s, strings, insns, map data.
+ * All the bytes that loader prog will use for read/write.
+ */
+struct loader_stack {
+ __u32 btf_fd;
+ __u32 map_fd[MAX_USED_MAPS];
+ __u32 prog_fd[MAX_USED_PROGS];
+ __u32 inner_map_fd;
+};
+
+#define stack_off(field) \
+ (__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
+
+#define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
+
+static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
+{
+ size_t off = gen->insn_cur - gen->insn_start;
+ void *insn_start;
+
+ if (gen->error)
+ return gen->error;
+ if (size > INT32_MAX || off + size > INT32_MAX) {
+ gen->error = -ERANGE;
+ return -ERANGE;
+ }
+ insn_start = realloc(gen->insn_start, off + size);
+ if (!insn_start) {
+ gen->error = -ENOMEM;
+ free(gen->insn_start);
+ gen->insn_start = NULL;
+ return -ENOMEM;
+ }
+ gen->insn_start = insn_start;
+ gen->insn_cur = insn_start + off;
+ return 0;
+}
+
+static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
+{
+ size_t off = gen->data_cur - gen->data_start;
+ void *data_start;
+
+ if (gen->error)
+ return gen->error;
+ if (size > INT32_MAX || off + size > INT32_MAX) {
+ gen->error = -ERANGE;
+ return -ERANGE;
+ }
+ data_start = realloc(gen->data_start, off + size);
+ if (!data_start) {
+ gen->error = -ENOMEM;
+ free(gen->data_start);
+ gen->data_start = NULL;
+ return -ENOMEM;
+ }
+ gen->data_start = data_start;
+ gen->data_cur = data_start + off;
+ return 0;
+}
+
+static void emit(struct bpf_gen *gen, struct bpf_insn insn)
+{
+ if (realloc_insn_buf(gen, sizeof(insn)))
+ return;
+ memcpy(gen->insn_cur, &insn, sizeof(insn));
+ gen->insn_cur += sizeof(insn);
+}
+
+static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
+{
+ emit(gen, insn1);
+ emit(gen, insn2);
+}
+
+void bpf_gen__init(struct bpf_gen *gen, int log_level)
+{
+ size_t stack_sz = sizeof(struct loader_stack);
+ int i;
+
+ gen->log_level = log_level;
+ /* save ctx pointer into R6 */
+ emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
+
+ /* bzero stack */
+ emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
+ emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
+
+ /* jump over cleanup code */
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
+ /* size of cleanup code below */
+ (stack_sz / 4) * 3 + 2));
+
+ /* remember the label where all error branches will jump to */
+ gen->cleanup_label = gen->insn_cur - gen->insn_start;
+ /* emit cleanup code: close all temp FDs */
+ for (i = 0; i < stack_sz; i += 4) {
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
+ emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
+ }
+ /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
+ emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
+ emit(gen, BPF_EXIT_INSN());
+}
+
+static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
+{
+ void *prev;
+
+ if (realloc_data_buf(gen, size))
+ return 0;
+ prev = gen->data_cur;
+ memcpy(gen->data_cur, data, size);
+ gen->data_cur += size;
+ return prev - gen->data_start;
+}
+
+static int insn_bytes_to_bpf_size(__u32 sz)
+{
+ switch (sz) {
+ case 8: return BPF_DW;
+ case 4: return BPF_W;
+ case 2: return BPF_H;
+ case 1: return BPF_B;
+ default: return -1;
+ }
+}
+
+/* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
+static void emit_rel_store(struct bpf_gen *gen, int off, int data)
+{
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, data));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, off));
+ emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
+}
+
+/* *(u64 *)(blob + off) = (u64)(void *)(%sp + stack_off) */
+static void emit_rel_store_sp(struct bpf_gen *gen, int off, int stack_off)
+{
+ emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_10));
+ emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, stack_off));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, off));
+ emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
+}
+
+static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
+ bool check_non_zero)
+{
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
+ if (check_non_zero)
+ /* If value in ctx is zero don't update the blob.
+ * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
+ */
+ emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, off));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
+}
+
+static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
+{
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, off));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
+}
+
+static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
+{
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
+}
+
+static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
+{
+ emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, attr));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
+ /* remember the result in R7 */
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+}
+
+static bool is_simm16(__s64 value)
+{
+ return value == (__s64)(__s16)value;
+}
+
+static void emit_check_err(struct bpf_gen *gen)
+{
+ __s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
+
+ /* R7 contains result of last sys_bpf command.
+ * if (R7 < 0) goto cleanup;
+ */
+ if (is_simm16(off)) {
+ emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
+ } else {
+ gen->error = -ERANGE;
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
+ }
+}
+
+/* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
+static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
+ const char *fmt, va_list args)
+{
+ char buf[1024];
+ int addr, len, ret;
+
+ if (!gen->log_level)
+ return;
+ ret = vsnprintf(buf, sizeof(buf), fmt, args);
+ if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
+ /* The special case to accommodate common debug_ret():
+ * to avoid specifying BPF_REG_7 and adding " r=%%d" to
+ * prints explicitly.
+ */
+ strcat(buf, " r=%d");
+ len = strlen(buf) + 1;
+ addr = add_data(gen, buf, len);
+
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, addr));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
+ if (reg1 >= 0)
+ emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
+ if (reg2 >= 0)
+ emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
+}
+
+static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ emit_debug(gen, reg1, reg2, fmt, args);
+ va_end(args);
+}
+
+static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ emit_debug(gen, BPF_REG_7, -1, fmt, args);
+ va_end(args);
+}
+
+static void __emit_sys_close(struct bpf_gen *gen)
+{
+ emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
+ /* 2 is the number of the following insns
+ * * 6 is additional insns in debug_regs
+ */
+ 2 + (gen->log_level ? 6 : 0)));
+ emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
+ debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
+}
+
+static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
+{
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
+ __emit_sys_close(gen);
+}
+
+static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
+{
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, blob_off));
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
+ __emit_sys_close(gen);
+}
+
+int bpf_gen__finish(struct bpf_gen *gen)
+{
+ int i;
+
+ emit_sys_close_stack(gen, stack_off(btf_fd));
+ for (i = 0; i < gen->nr_progs; i++)
+ move_stack2ctx(gen,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * gen->nr_maps +
+ sizeof(struct bpf_prog_desc) * i +
+ offsetof(struct bpf_prog_desc, prog_fd), 4,
+ stack_off(prog_fd[i]));
+ for (i = 0; i < gen->nr_maps; i++)
+ move_stack2ctx(gen,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * i +
+ offsetof(struct bpf_map_desc, map_fd), 4,
+ stack_off(map_fd[i]));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
+ emit(gen, BPF_EXIT_INSN());
+ pr_debug("gen: finish %d\n", gen->error);
+ if (!gen->error) {
+ struct gen_loader_opts *opts = gen->opts;
+
+ opts->insns = gen->insn_start;
+ opts->insns_sz = gen->insn_cur - gen->insn_start;
+ opts->data = gen->data_start;
+ opts->data_sz = gen->data_cur - gen->data_start;
+ }
+ return gen->error;
+}
+
+void bpf_gen__free(struct bpf_gen *gen)
+{
+ if (!gen)
+ return;
+ free(gen->data_start);
+ free(gen->insn_start);
+ free(gen);
+}
+
+void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
+ __u32 btf_raw_size)
+{
+ int attr_size = offsetofend(union bpf_attr, btf_log_level);
+ int btf_data, btf_load_attr;
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: load_btf: size %d\n", btf_raw_size);
+ btf_data = add_data(gen, btf_raw_data, btf_raw_size);
+
+ attr.btf_size = btf_raw_size;
+ btf_load_attr = add_data(gen, &attr, attr_size);
+
+ /* populate union bpf_attr with user provided log details */
+ move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
+ offsetof(struct bpf_loader_ctx, log_level), false);
+ move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
+ offsetof(struct bpf_loader_ctx, log_size), false);
+ move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
+ offsetof(struct bpf_loader_ctx, log_buf), false);
+ /* populate union bpf_attr with a pointer to the BTF data */
+ emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
+ /* emit BTF_LOAD command */
+ emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
+ debug_ret(gen, "btf_load size %d", btf_raw_size);
+ emit_check_err(gen);
+ /* remember btf_fd in the stack, if successful */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
+}
+
+void bpf_gen__map_create(struct bpf_gen *gen,
+ struct bpf_create_map_attr *map_attr, int map_idx)
+{
+ int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id);
+ bool close_inner_map_fd = false;
+ int map_create_attr;
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ attr.map_type = map_attr->map_type;
+ attr.key_size = map_attr->key_size;
+ attr.value_size = map_attr->value_size;
+ attr.map_flags = map_attr->map_flags;
+ memcpy(attr.map_name, map_attr->name,
+ min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1));
+ attr.numa_node = map_attr->numa_node;
+ attr.map_ifindex = map_attr->map_ifindex;
+ attr.max_entries = map_attr->max_entries;
+ switch (attr.map_type) {
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ case BPF_MAP_TYPE_CGROUP_ARRAY:
+ case BPF_MAP_TYPE_STACK_TRACE:
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_DEVMAP_HASH:
+ case BPF_MAP_TYPE_CPUMAP:
+ case BPF_MAP_TYPE_XSKMAP:
+ case BPF_MAP_TYPE_SOCKMAP:
+ case BPF_MAP_TYPE_SOCKHASH:
+ case BPF_MAP_TYPE_QUEUE:
+ case BPF_MAP_TYPE_STACK:
+ case BPF_MAP_TYPE_RINGBUF:
+ break;
+ default:
+ attr.btf_key_type_id = map_attr->btf_key_type_id;
+ attr.btf_value_type_id = map_attr->btf_value_type_id;
+ }
+
+ pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
+ attr.map_name, map_idx, map_attr->map_type, attr.btf_value_type_id);
+
+ map_create_attr = add_data(gen, &attr, attr_size);
+ if (attr.btf_value_type_id)
+ /* populate union bpf_attr with btf_fd saved in the stack earlier */
+ move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
+ stack_off(btf_fd));
+ switch (attr.map_type) {
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
+ stack_off(inner_map_fd));
+ close_inner_map_fd = true;
+ break;
+ default:
+ break;
+ }
+ /* conditionally update max_entries */
+ if (map_idx >= 0)
+ move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * map_idx +
+ offsetof(struct bpf_map_desc, max_entries),
+ true /* check that max_entries != 0 */);
+ /* emit MAP_CREATE command */
+ emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
+ debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
+ attr.map_name, map_idx, map_attr->map_type, attr.value_size,
+ attr.btf_value_type_id);
+ emit_check_err(gen);
+ /* remember map_fd in the stack, if successful */
+ if (map_idx < 0) {
+ /* This bpf_gen__map_create() function is called with map_idx >= 0
+ * for all maps that libbpf loading logic tracks.
+ * It's called with -1 to create an inner map.
+ */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
+ stack_off(inner_map_fd)));
+ } else if (map_idx != gen->nr_maps) {
+ gen->error = -EDOM; /* internal bug */
+ return;
+ } else {
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
+ stack_off(map_fd[map_idx])));
+ gen->nr_maps++;
+ }
+ if (close_inner_map_fd)
+ emit_sys_close_stack(gen, stack_off(inner_map_fd));
+}
+
+void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
+ enum bpf_attach_type type)
+{
+ const char *prefix;
+ int kind, ret;
+
+ btf_get_kernel_prefix_kind(type, &prefix, &kind);
+ gen->attach_kind = kind;
+ ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
+ prefix, attach_name);
+ if (ret == sizeof(gen->attach_target))
+ gen->error = -ENOSPC;
+}
+
+static void emit_find_attach_target(struct bpf_gen *gen)
+{
+ int name, len = strlen(gen->attach_target) + 1;
+
+ pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
+ name = add_data(gen, gen->attach_target, len);
+
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, name));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+ debug_ret(gen, "find_by_name_kind(%s,%d)",
+ gen->attach_target, gen->attach_kind);
+ emit_check_err(gen);
+ /* if successful, btf_id is in lower 32-bit of R7 and
+ * btf_obj_fd is in upper 32-bit
+ */
+}
+
+void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
+ int insn_idx)
+{
+ struct ksym_relo_desc *relo;
+
+ relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
+ if (!relo) {
+ gen->error = -ENOMEM;
+ return;
+ }
+ gen->relos = relo;
+ relo += gen->relo_cnt;
+ relo->name = name;
+ relo->kind = kind;
+ relo->insn_idx = insn_idx;
+ gen->relo_cnt++;
+}
+
+static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
+{
+ int name, insn, len = strlen(relo->name) + 1;
+
+ pr_debug("gen: emit_relo: %s at %d\n", relo->name, relo->insn_idx);
+ name = add_data(gen, relo->name, len);
+
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, name));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+ debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
+ emit_check_err(gen);
+ /* store btf_id into insn[insn_idx].imm */
+ insn = insns + sizeof(struct bpf_insn) * relo->insn_idx +
+ offsetof(struct bpf_insn, imm);
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, insn));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, 0));
+ if (relo->kind == BTF_KIND_VAR) {
+ /* store btf_obj_fd into insn[insn_idx + 1].imm */
+ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
+ sizeof(struct bpf_insn)));
+ }
+}
+
+static void emit_relos(struct bpf_gen *gen, int insns)
+{
+ int i;
+
+ for (i = 0; i < gen->relo_cnt; i++)
+ emit_relo(gen, gen->relos + i, insns);
+}
+
+static void cleanup_relos(struct bpf_gen *gen, int insns)
+{
+ int i, insn;
+
+ for (i = 0; i < gen->relo_cnt; i++) {
+ if (gen->relos[i].kind != BTF_KIND_VAR)
+ continue;
+ /* close fd recorded in insn[insn_idx + 1].imm */
+ insn = insns +
+ sizeof(struct bpf_insn) * (gen->relos[i].insn_idx + 1) +
+ offsetof(struct bpf_insn, imm);
+ emit_sys_close_blob(gen, insn);
+ }
+ if (gen->relo_cnt) {
+ free(gen->relos);
+ gen->relo_cnt = 0;
+ gen->relos = NULL;
+ }
+}
+
+void bpf_gen__prog_load(struct bpf_gen *gen,
+ struct bpf_prog_load_params *load_attr, int prog_idx)
+{
+ int attr_size = offsetofend(union bpf_attr, fd_array);
+ int prog_load_attr, license, insns, func_info, line_info;
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: prog_load: type %d insns_cnt %zd\n",
+ load_attr->prog_type, load_attr->insn_cnt);
+ /* add license string to blob of bytes */
+ license = add_data(gen, load_attr->license, strlen(load_attr->license) + 1);
+ /* add insns to blob of bytes */
+ insns = add_data(gen, load_attr->insns,
+ load_attr->insn_cnt * sizeof(struct bpf_insn));
+
+ attr.prog_type = load_attr->prog_type;
+ attr.expected_attach_type = load_attr->expected_attach_type;
+ attr.attach_btf_id = load_attr->attach_btf_id;
+ attr.prog_ifindex = load_attr->prog_ifindex;
+ attr.kern_version = 0;
+ attr.insn_cnt = (__u32)load_attr->insn_cnt;
+ attr.prog_flags = load_attr->prog_flags;
+
+ attr.func_info_rec_size = load_attr->func_info_rec_size;
+ attr.func_info_cnt = load_attr->func_info_cnt;
+ func_info = add_data(gen, load_attr->func_info,
+ attr.func_info_cnt * attr.func_info_rec_size);
+
+ attr.line_info_rec_size = load_attr->line_info_rec_size;
+ attr.line_info_cnt = load_attr->line_info_cnt;
+ line_info = add_data(gen, load_attr->line_info,
+ attr.line_info_cnt * attr.line_info_rec_size);
+
+ memcpy(attr.prog_name, load_attr->name,
+ min((unsigned)strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
+ prog_load_attr = add_data(gen, &attr, attr_size);
+
+ /* populate union bpf_attr with a pointer to license */
+ emit_rel_store(gen, attr_field(prog_load_attr, license), license);
+
+ /* populate union bpf_attr with a pointer to instructions */
+ emit_rel_store(gen, attr_field(prog_load_attr, insns), insns);
+
+ /* populate union bpf_attr with a pointer to func_info */
+ emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
+
+ /* populate union bpf_attr with a pointer to line_info */
+ emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
+
+ /* populate union bpf_attr fd_array with a pointer to stack where map_fds are saved */
+ emit_rel_store_sp(gen, attr_field(prog_load_attr, fd_array),
+ stack_off(map_fd[0]));
+
+ /* populate union bpf_attr with user provided log details */
+ move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
+ offsetof(struct bpf_loader_ctx, log_level), false);
+ move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
+ offsetof(struct bpf_loader_ctx, log_size), false);
+ move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
+ offsetof(struct bpf_loader_ctx, log_buf), false);
+ /* populate union bpf_attr with btf_fd saved in the stack earlier */
+ move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
+ stack_off(btf_fd));
+ if (gen->attach_kind) {
+ emit_find_attach_target(gen);
+ /* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, prog_load_attr));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
+ offsetof(union bpf_attr, attach_btf_id)));
+ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
+ offsetof(union bpf_attr, attach_btf_obj_fd)));
+ }
+ emit_relos(gen, insns);
+ /* emit PROG_LOAD command */
+ emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
+ debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
+ /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
+ cleanup_relos(gen, insns);
+ if (gen->attach_kind)
+ emit_sys_close_blob(gen,
+ attr_field(prog_load_attr, attach_btf_obj_fd));
+ emit_check_err(gen);
+ /* remember prog_fd in the stack, if successful */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
+ stack_off(prog_fd[gen->nr_progs])));
+ gen->nr_progs++;
+}
+
+void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
+ __u32 value_size)
+{
+ int attr_size = offsetofend(union bpf_attr, flags);
+ int map_update_attr, value, key;
+ union bpf_attr attr;
+ int zero = 0;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: map_update_elem: idx %d\n", map_idx);
+
+ value = add_data(gen, pvalue, value_size);
+ key = add_data(gen, &zero, sizeof(zero));
+
+ /* if (map_desc[map_idx].initial_value)
+ * copy_from_user(value, initial_value, value_size);
+ */
+ emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * map_idx +
+ offsetof(struct bpf_map_desc, initial_value)));
+ emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 4));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, value));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
+
+ map_update_attr = add_data(gen, &attr, attr_size);
+ move_stack2blob(gen, attr_field(map_update_attr, map_fd), 4,
+ stack_off(map_fd[map_idx]));
+ emit_rel_store(gen, attr_field(map_update_attr, key), key);
+ emit_rel_store(gen, attr_field(map_update_attr, value), value);
+ /* emit MAP_UPDATE_ELEM command */
+ emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
+ debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
+ emit_check_err(gen);
+}
+
+void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
+{
+ int attr_size = offsetofend(union bpf_attr, map_fd);
+ int map_freeze_attr;
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: map_freeze: idx %d\n", map_idx);
+ map_freeze_attr = add_data(gen, &attr, attr_size);
+ move_stack2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
+ stack_off(map_fd[map_idx]));
+ /* emit MAP_FREEZE command */
+ emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
+ debug_ret(gen, "map_freeze");
+ emit_check_err(gen);
+}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index c41d9b2b59ac..69cd1a835ebd 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -54,6 +54,7 @@
#include "str_error.h"
#include "libbpf_internal.h"
#include "hashmap.h"
+#include "bpf_gen_internal.h"
#ifndef BPF_FS_MAGIC
#define BPF_FS_MAGIC 0xcafe4a11
@@ -178,7 +179,7 @@ enum kern_feature_id {
__FEAT_CNT,
};
-static bool kernel_supports(enum kern_feature_id feat_id);
+static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
enum reloc_type {
RELO_LD64,
@@ -432,6 +433,8 @@ struct bpf_object {
bool loaded;
bool has_subcalls;
+ struct bpf_gen *gen_loader;
+
/*
* Information when doing elf related work. Only valid if fd
* is valid.
@@ -677,6 +680,11 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
return -LIBBPF_ERRNO__FORMAT;
}
+ if (sec_idx != obj->efile.text_shndx && GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+ pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
+ return -ENOTSUP;
+ }
+
pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
@@ -700,13 +708,14 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
if (err)
return err;
- /* if function is a global/weak symbol, but has hidden
- * visibility (STV_HIDDEN), mark its BTF FUNC as static to
- * enable more permissive BPF verification mode with more
- * outside context available to BPF verifier
+ /* if function is a global/weak symbol, but has restricted
+ * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
+ * as static to enable more permissive BPF verification mode
+ * with more outside context available to BPF verifier
*/
if (GELF_ST_BIND(sym.st_info) != STB_LOCAL
- && GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN)
+ && (GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN
+ || GELF_ST_VISIBILITY(sym.st_other) == STV_INTERNAL))
prog->mark_btf_static = true;
nr_progs++;
@@ -1794,7 +1803,6 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
if (!symbols)
return -EINVAL;
-
scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
data = elf_sec_data(obj, scn);
if (!scn || !data) {
@@ -1854,6 +1862,12 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
return -LIBBPF_ERRNO__FORMAT;
}
+ if (GELF_ST_TYPE(sym.st_info) == STT_SECTION
+ || GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+ pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
+ return -ENOTSUP;
+ }
+
map->libbpf_type = LIBBPF_MAP_UNSPEC;
map->sec_idx = sym.st_shndx;
map->sec_offset = sym.st_value;
@@ -2261,6 +2275,16 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
pr_debug("map '%s': found inner map definition.\n", map->name);
}
+static const char *btf_var_linkage_str(__u32 linkage)
+{
+ switch (linkage) {
+ case BTF_VAR_STATIC: return "static";
+ case BTF_VAR_GLOBAL_ALLOCATED: return "global";
+ case BTF_VAR_GLOBAL_EXTERN: return "extern";
+ default: return "unknown";
+ }
+}
+
static int bpf_object__init_user_btf_map(struct bpf_object *obj,
const struct btf_type *sec,
int var_idx, int sec_idx,
@@ -2293,10 +2317,9 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
map_name, btf_kind_str(var));
return -EINVAL;
}
- if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
- var_extra->linkage != BTF_VAR_STATIC) {
- pr_warn("map '%s': unsupported var linkage %u.\n",
- map_name, var_extra->linkage);
+ if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
+ pr_warn("map '%s': unsupported map linkage %s.\n",
+ map_name, btf_var_linkage_str(var_extra->linkage));
return -EOPNOTSUPP;
}
@@ -2443,20 +2466,20 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
static bool btf_needs_sanitization(struct bpf_object *obj)
{
- bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
- bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
- bool has_float = kernel_supports(FEAT_BTF_FLOAT);
- bool has_func = kernel_supports(FEAT_BTF_FUNC);
+ bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
+ bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
+ bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
+ bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
return !has_func || !has_datasec || !has_func_global || !has_float;
}
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
{
- bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
- bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
- bool has_float = kernel_supports(FEAT_BTF_FLOAT);
- bool has_func = kernel_supports(FEAT_BTF_FUNC);
+ bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
+ bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
+ bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
+ bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
struct btf_type *t;
int i, j, vlen;
@@ -2637,7 +2660,7 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
int err;
/* btf_vmlinux could be loaded earlier */
- if (obj->btf_vmlinux)
+ if (obj->btf_vmlinux || obj->gen_loader)
return 0;
if (!force && !obj_needs_vmlinux_btf(obj))
@@ -2662,7 +2685,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
if (!obj->btf)
return 0;
- if (!kernel_supports(FEAT_BTF)) {
+ if (!kernel_supports(obj, FEAT_BTF)) {
if (kernel_needs_btf(obj)) {
err = -EOPNOTSUPP;
goto report;
@@ -2719,7 +2742,20 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
bpf_object__sanitize_btf(obj, kern_btf);
}
- err = btf__load(kern_btf);
+ if (obj->gen_loader) {
+ __u32 raw_size = 0;
+ const void *raw_data = btf__get_raw_data(kern_btf, &raw_size);
+
+ if (!raw_data)
+ return -ENOMEM;
+ bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
+ /* Pretend to have valid FD to pass various fd >= 0 checks.
+ * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
+ */
+ btf__set_fd(kern_btf, 0);
+ } else {
+ err = btf__load(kern_btf);
+ }
if (sanitize) {
if (!err) {
/* move fd to libbpf's BTF */
@@ -4293,11 +4329,17 @@ static struct kern_feature_desc {
},
};
-static bool kernel_supports(enum kern_feature_id feat_id)
+static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
{
struct kern_feature_desc *feat = &feature_probes[feat_id];
int ret;
+ if (obj->gen_loader)
+ /* To generate loader program assume the latest kernel
+ * to avoid doing extra prog_load, map_create syscalls.
+ */
+ return true;
+
if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
ret = feat->probe();
if (ret > 0) {
@@ -4380,6 +4422,13 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
char *cp, errmsg[STRERR_BUFSIZE];
int err, zero = 0;
+ if (obj->gen_loader) {
+ bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
+ map->mmaped, map->def.value_size);
+ if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
+ bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
+ return 0;
+ }
err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
if (err) {
err = -errno;
@@ -4405,14 +4454,14 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
static void bpf_map__destroy(struct bpf_map *map);
-static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
+static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
{
struct bpf_create_map_attr create_attr;
struct bpf_map_def *def = &map->def;
memset(&create_attr, 0, sizeof(create_attr));
- if (kernel_supports(FEAT_PROG_NAME))
+ if (kernel_supports(obj, FEAT_PROG_NAME))
create_attr.name = map->name;
create_attr.map_ifindex = map->map_ifindex;
create_attr.map_type = def->type;
@@ -4453,7 +4502,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
if (map->inner_map) {
int err;
- err = bpf_object__create_map(obj, map->inner_map);
+ err = bpf_object__create_map(obj, map->inner_map, true);
if (err) {
pr_warn("map '%s': failed to create inner map: %d\n",
map->name, err);
@@ -4465,7 +4514,15 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
create_attr.inner_map_fd = map->inner_map_fd;
}
- map->fd = bpf_create_map_xattr(&create_attr);
+ if (obj->gen_loader) {
+ bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps);
+ /* Pretend to have valid FD to pass various fd >= 0 checks.
+ * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
+ */
+ map->fd = 0;
+ } else {
+ map->fd = bpf_create_map_xattr(&create_attr);
+ }
if (map->fd < 0 && (create_attr.btf_key_type_id ||
create_attr.btf_value_type_id)) {
char *cp, errmsg[STRERR_BUFSIZE];
@@ -4486,6 +4543,8 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
return -errno;
if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
+ if (obj->gen_loader)
+ map->inner_map->fd = -1;
bpf_map__destroy(map->inner_map);
zfree(&map->inner_map);
}
@@ -4493,11 +4552,11 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
return 0;
}
-static int init_map_slots(struct bpf_map *map)
+static int init_map_slots(struct bpf_object *obj, struct bpf_map *map)
{
const struct bpf_map *targ_map;
unsigned int i;
- int fd, err;
+ int fd, err = 0;
for (i = 0; i < map->init_slots_sz; i++) {
if (!map->init_slots[i])
@@ -4505,7 +4564,13 @@ static int init_map_slots(struct bpf_map *map)
targ_map = map->init_slots[i];
fd = bpf_map__fd(targ_map);
- err = bpf_map_update_elem(map->fd, &i, &fd, 0);
+ if (obj->gen_loader) {
+ pr_warn("// TODO map_update_elem: idx %ld key %d value==map_idx %ld\n",
+ map - obj->maps, i, targ_map - obj->maps);
+ return -ENOTSUP;
+ } else {
+ err = bpf_map_update_elem(map->fd, &i, &fd, 0);
+ }
if (err) {
err = -errno;
pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
@@ -4547,7 +4612,7 @@ bpf_object__create_maps(struct bpf_object *obj)
pr_debug("map '%s': skipping creation (preset fd=%d)\n",
map->name, map->fd);
} else {
- err = bpf_object__create_map(obj, map);
+ err = bpf_object__create_map(obj, map, false);
if (err)
goto err_out;
@@ -4563,7 +4628,7 @@ bpf_object__create_maps(struct bpf_object *obj)
}
if (map->init_slots_sz) {
- err = init_map_slots(map);
+ err = init_map_slots(obj, map);
if (err < 0) {
zclose(map->fd);
goto err_out;
@@ -4973,11 +5038,14 @@ static int load_module_btfs(struct bpf_object *obj)
if (obj->btf_modules_loaded)
return 0;
+ if (obj->gen_loader)
+ return 0;
+
/* don't do this again, even if we find no module BTFs */
obj->btf_modules_loaded = true;
/* kernel too old to support module BTFs */
- if (!kernel_supports(FEAT_MODULE_BTF))
+ if (!kernel_supports(obj, FEAT_MODULE_BTF))
return 0;
while (true) {
@@ -6120,6 +6188,12 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
if (str_is_empty(spec_str))
return -EINVAL;
+ if (prog->obj->gen_loader) {
+ pr_warn("// TODO core_relo: prog %ld insn[%d] %s %s kind %d\n",
+ prog - prog->obj->programs, relo->insn_off / 8,
+ local_name, spec_str, relo->kind);
+ return -ENOTSUP;
+ }
err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
if (err) {
pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
@@ -6371,19 +6445,34 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
switch (relo->type) {
case RELO_LD64:
- insn[0].src_reg = BPF_PSEUDO_MAP_FD;
- insn[0].imm = obj->maps[relo->map_idx].fd;
+ if (obj->gen_loader) {
+ insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
+ insn[0].imm = relo->map_idx;
+ } else {
+ insn[0].src_reg = BPF_PSEUDO_MAP_FD;
+ insn[0].imm = obj->maps[relo->map_idx].fd;
+ }
break;
case RELO_DATA:
- insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
insn[1].imm = insn[0].imm + relo->sym_off;
- insn[0].imm = obj->maps[relo->map_idx].fd;
+ if (obj->gen_loader) {
+ insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
+ insn[0].imm = relo->map_idx;
+ } else {
+ insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
+ insn[0].imm = obj->maps[relo->map_idx].fd;
+ }
break;
case RELO_EXTERN_VAR:
ext = &obj->externs[relo->sym_off];
if (ext->type == EXT_KCFG) {
- insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
- insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
+ if (obj->gen_loader) {
+ insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
+ insn[0].imm = obj->kconfig_map_idx;
+ } else {
+ insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
+ insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
+ }
insn[1].imm = ext->kcfg.data_off;
} else /* EXT_KSYM */ {
if (ext->ksym.type_id) { /* typed ksyms */
@@ -6402,11 +6491,15 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
insn[0].imm = ext->ksym.kernel_btf_id;
break;
case RELO_SUBPROG_ADDR:
- insn[0].src_reg = BPF_PSEUDO_FUNC;
- /* will be handled as a follow up pass */
+ if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
+ pr_warn("prog '%s': relo #%d: bad insn\n",
+ prog->name, i);
+ return -EINVAL;
+ }
+ /* handled already */
break;
case RELO_CALL:
- /* will be handled as a follow up pass */
+ /* handled already */
break;
default:
pr_warn("prog '%s': relo #%d: bad relo type %d\n",
@@ -6497,7 +6590,7 @@ reloc_prog_func_and_line_info(const struct bpf_object *obj,
/* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
* supprot func/line info
*/
- if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
+ if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
return 0;
/* only attempt func info relocation if main program's func_info
@@ -6575,6 +6668,30 @@ static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, si
sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
}
+static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
+{
+ int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
+ struct reloc_desc *relos;
+ int i;
+
+ if (main_prog == subprog)
+ return 0;
+ relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
+ if (!relos)
+ return -ENOMEM;
+ memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
+ sizeof(*relos) * subprog->nr_reloc);
+
+ for (i = main_prog->nr_reloc; i < new_cnt; i++)
+ relos[i].insn_idx += subprog->sub_insn_off;
+ /* After insn_idx adjustment the 'relos' array is still sorted
+ * by insn_idx and doesn't break bsearch.
+ */
+ main_prog->reloc_desc = relos;
+ main_prog->nr_reloc = new_cnt;
+ return 0;
+}
+
static int
bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
struct bpf_program *prog)
@@ -6595,6 +6712,11 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
continue;
relo = find_prog_insn_relo(prog, insn_idx);
+ if (relo && relo->type == RELO_EXTERN_FUNC)
+ /* kfunc relocations will be handled later
+ * in bpf_object__relocate_data()
+ */
+ continue;
if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
prog->name, insn_idx, relo->type);
@@ -6669,6 +6791,10 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
main_prog->name, subprog->insns_cnt, subprog->name);
+ /* The subprog insns are now appended. Append its relos too. */
+ err = append_subprog_relos(main_prog, subprog);
+ if (err)
+ return err;
err = bpf_object__reloc_code(obj, main_prog, subprog);
if (err)
return err;
@@ -6798,11 +6924,25 @@ bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
return 0;
}
+static void
+bpf_object__free_relocs(struct bpf_object *obj)
+{
+ struct bpf_program *prog;
+ int i;
+
+ /* free up relocation descriptors */
+ for (i = 0; i < obj->nr_programs; i++) {
+ prog = &obj->programs[i];
+ zfree(&prog->reloc_desc);
+ prog->nr_reloc = 0;
+ }
+}
+
static int
bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
{
struct bpf_program *prog;
- size_t i;
+ size_t i, j;
int err;
if (obj->btf_ext) {
@@ -6813,23 +6953,32 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
}
- /* relocate data references first for all programs and sub-programs,
- * as they don't change relative to code locations, so subsequent
- * subprogram processing won't need to re-calculate any of them
+
+ /* Before relocating calls pre-process relocations and mark
+ * few ld_imm64 instructions that points to subprogs.
+ * Otherwise bpf_object__reloc_code() later would have to consider
+ * all ld_imm64 insns as relocation candidates. That would
+ * reduce relocation speed, since amount of find_prog_insn_relo()
+ * would increase and most of them will fail to find a relo.
*/
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- err = bpf_object__relocate_data(obj, prog);
- if (err) {
- pr_warn("prog '%s': failed to relocate data references: %d\n",
- prog->name, err);
- return err;
+ for (j = 0; j < prog->nr_reloc; j++) {
+ struct reloc_desc *relo = &prog->reloc_desc[j];
+ struct bpf_insn *insn = &prog->insns[relo->insn_idx];
+
+ /* mark the insn, so it's recognized by insn_is_pseudo_func() */
+ if (relo->type == RELO_SUBPROG_ADDR)
+ insn[0].src_reg = BPF_PSEUDO_FUNC;
}
}
- /* now relocate subprogram calls and append used subprograms to main
+
+ /* relocate subprogram calls and append used subprograms to main
* programs; each copy of subprogram code needs to be relocated
* differently for each main program, because its code location might
- * have changed
+ * have changed.
+ * Append subprog relos to main programs to allow data relos to be
+ * processed after text is completely relocated.
*/
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
@@ -6846,12 +6995,20 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
}
- /* free up relocation descriptors */
+ /* Process data relos for main programs */
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- zfree(&prog->reloc_desc);
- prog->nr_reloc = 0;
+ if (prog_is_subprog(obj, prog))
+ continue;
+ err = bpf_object__relocate_data(obj, prog);
+ if (err) {
+ pr_warn("prog '%s': failed to relocate data references: %d\n",
+ prog->name, err);
+ return err;
+ }
}
+ if (!obj->gen_loader)
+ bpf_object__free_relocs(obj);
return 0;
}
@@ -7040,6 +7197,9 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program
enum bpf_func_id func_id;
int i;
+ if (obj->gen_loader)
+ return 0;
+
for (i = 0; i < prog->insns_cnt; i++, insn++) {
if (!insn_is_helper_call(insn, &func_id))
continue;
@@ -7051,12 +7211,12 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program
switch (func_id) {
case BPF_FUNC_probe_read_kernel:
case BPF_FUNC_probe_read_user:
- if (!kernel_supports(FEAT_PROBE_READ_KERN))
+ if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
insn->imm = BPF_FUNC_probe_read;
break;
case BPF_FUNC_probe_read_kernel_str:
case BPF_FUNC_probe_read_user_str:
- if (!kernel_supports(FEAT_PROBE_READ_KERN))
+ if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
insn->imm = BPF_FUNC_probe_read_str;
break;
default:
@@ -7091,12 +7251,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.prog_type = prog->type;
/* old kernels might not support specifying expected_attach_type */
- if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
+ if (!kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
prog->sec_def->is_exp_attach_type_optional)
load_attr.expected_attach_type = 0;
else
load_attr.expected_attach_type = prog->expected_attach_type;
- if (kernel_supports(FEAT_PROG_NAME))
+ if (kernel_supports(prog->obj, FEAT_PROG_NAME))
load_attr.name = prog->name;
load_attr.insns = insns;
load_attr.insn_cnt = insns_cnt;
@@ -7112,7 +7272,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
/* specify func_info/line_info only if kernel supports them */
btf_fd = bpf_object__btf_fd(prog->obj);
- if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
+ if (btf_fd >= 0 && kernel_supports(prog->obj, FEAT_BTF_FUNC)) {
load_attr.prog_btf_fd = btf_fd;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
@@ -7124,6 +7284,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.log_level = prog->log_level;
load_attr.prog_flags = prog->prog_flags;
+ if (prog->obj->gen_loader) {
+ bpf_gen__prog_load(prog->obj->gen_loader, &load_attr,
+ prog - prog->obj->programs);
+ *pfd = -1;
+ return 0;
+ }
retry_load:
if (log_buf_size) {
log_buf = malloc(log_buf_size);
@@ -7142,7 +7308,7 @@ retry_load:
pr_debug("verifier log:\n%s", log_buf);
if (prog->obj->rodata_map_idx >= 0 &&
- kernel_supports(FEAT_PROG_BIND_MAP)) {
+ kernel_supports(prog->obj, FEAT_PROG_BIND_MAP)) {
struct bpf_map *rodata_map =
&prog->obj->maps[prog->obj->rodata_map_idx];
@@ -7201,6 +7367,38 @@ out:
return ret;
}
+static int bpf_program__record_externs(struct bpf_program *prog)
+{
+ struct bpf_object *obj = prog->obj;
+ int i;
+
+ for (i = 0; i < prog->nr_reloc; i++) {
+ struct reloc_desc *relo = &prog->reloc_desc[i];
+ struct extern_desc *ext = &obj->externs[relo->sym_off];
+
+ switch (relo->type) {
+ case RELO_EXTERN_VAR:
+ if (ext->type != EXT_KSYM)
+ continue;
+ if (!ext->ksym.type_id) {
+ pr_warn("typeless ksym %s is not supported yet\n",
+ ext->name);
+ return -ENOTSUP;
+ }
+ bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_VAR,
+ relo->insn_idx);
+ break;
+ case RELO_EXTERN_FUNC:
+ bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_FUNC,
+ relo->insn_idx);
+ break;
+ default:
+ continue;
+ }
+ }
+ return 0;
+}
+
static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
@@ -7246,6 +7444,8 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
prog->name, prog->instances.nr);
}
+ if (prog->obj->gen_loader)
+ bpf_program__record_externs(prog);
err = load_program(prog, prog->insns, prog->insns_cnt,
license, kern_ver, &fd);
if (!err)
@@ -7322,6 +7522,8 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
if (err)
return err;
}
+ if (obj->gen_loader)
+ bpf_object__free_relocs(obj);
return 0;
}
@@ -7500,11 +7702,11 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
bpf_object__for_each_map(m, obj) {
if (!bpf_map__is_internal(m))
continue;
- if (!kernel_supports(FEAT_GLOBAL_DATA)) {
+ if (!kernel_supports(obj, FEAT_GLOBAL_DATA)) {
pr_warn("kernel doesn't support global data\n");
return -ENOTSUP;
}
- if (!kernel_supports(FEAT_ARRAY_MMAP))
+ if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
m->def.map_flags ^= BPF_F_MMAPABLE;
}
@@ -7702,6 +7904,12 @@ static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
if (ext->type != EXT_KSYM || !ext->ksym.type_id)
continue;
+ if (obj->gen_loader) {
+ ext->is_set = true;
+ ext->ksym.kernel_btf_obj_fd = 0;
+ ext->ksym.kernel_btf_id = 0;
+ continue;
+ }
t = btf__type_by_id(obj->btf, ext->btf_id);
if (btf_is_var(t))
err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
@@ -7816,6 +8024,9 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
return -EINVAL;
}
+ if (obj->gen_loader)
+ bpf_gen__init(obj->gen_loader, attr->log_level);
+
err = bpf_object__probe_loading(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
@@ -7826,6 +8037,15 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
err = err ? : bpf_object__load_progs(obj, attr->log_level);
+ if (obj->gen_loader) {
+ /* reset FDs */
+ btf__set_fd(obj->btf, -1);
+ for (i = 0; i < obj->nr_maps; i++)
+ obj->maps[i].fd = -1;
+ if (!err)
+ err = bpf_gen__finish(obj->gen_loader);
+ }
+
/* clean up module BTFs */
for (i = 0; i < obj->btf_module_cnt; i++) {
close(obj->btf_modules[i].fd);
@@ -8451,6 +8671,7 @@ void bpf_object__close(struct bpf_object *obj)
if (obj->clear_priv)
obj->clear_priv(obj, obj->priv);
+ bpf_gen__free(obj->gen_loader);
bpf_object__elf_finish(obj);
bpf_object__unload(obj);
btf__free(obj->btf);
@@ -8541,6 +8762,22 @@ void *bpf_object__priv(const struct bpf_object *obj)
return obj ? obj->priv : ERR_PTR(-EINVAL);
}
+int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
+{
+ struct bpf_gen *gen;
+
+ if (!opts)
+ return -EFAULT;
+ if (!OPTS_VALID(opts, gen_loader_opts))
+ return -EINVAL;
+ gen = calloc(sizeof(*gen), 1);
+ if (!gen)
+ return -ENOMEM;
+ gen->opts = opts;
+ obj->gen_loader = gen;
+ return 0;
+}
+
static struct bpf_program *
__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
bool forward)
@@ -8887,6 +9124,8 @@ static const struct bpf_sec_def section_defs[] = {
.expected_attach_type = BPF_TRACE_ITER,
.is_attach_btf = true,
.attach_fn = attach_iter),
+ SEC_DEF("syscall", SYSCALL,
+ .is_sleepable = true),
BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
BPF_XDP_DEVMAP),
BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP,
@@ -9176,6 +9415,28 @@ invalid_prog:
#define BTF_ITER_PREFIX "bpf_iter_"
#define BTF_MAX_NAME_SIZE 128
+void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
+ const char **prefix, int *kind)
+{
+ switch (attach_type) {
+ case BPF_TRACE_RAW_TP:
+ *prefix = BTF_TRACE_PREFIX;
+ *kind = BTF_KIND_TYPEDEF;
+ break;
+ case BPF_LSM_MAC:
+ *prefix = BTF_LSM_PREFIX;
+ *kind = BTF_KIND_FUNC;
+ break;
+ case BPF_TRACE_ITER:
+ *prefix = BTF_ITER_PREFIX;
+ *kind = BTF_KIND_FUNC;
+ break;
+ default:
+ *prefix = "";
+ *kind = BTF_KIND_FUNC;
+ }
+}
+
static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
const char *name, __u32 kind)
{
@@ -9196,21 +9457,11 @@ static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
static inline int find_attach_btf_id(struct btf *btf, const char *name,
enum bpf_attach_type attach_type)
{
- int err;
+ const char *prefix;
+ int kind;
- if (attach_type == BPF_TRACE_RAW_TP)
- err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
- BTF_KIND_TYPEDEF);
- else if (attach_type == BPF_LSM_MAC)
- err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
- BTF_KIND_FUNC);
- else if (attach_type == BPF_TRACE_ITER)
- err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name,
- BTF_KIND_FUNC);
- else
- err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
-
- return err;
+ btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
+ return find_btf_by_prefix_kind(btf, prefix, name, kind);
}
int libbpf_find_vmlinux_btf_id(const char *name,
@@ -9309,7 +9560,7 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd,
__u32 attach_prog_fd = prog->attach_prog_fd;
const char *name = prog->sec_name, *attach_name;
const struct bpf_sec_def *sec = NULL;
- int i, err;
+ int i, err = 0;
if (!name)
return -EINVAL;
@@ -9344,7 +9595,13 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd,
}
/* kernel/module BTF ID */
- err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
+ if (prog->obj->gen_loader) {
+ bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
+ *btf_obj_fd = 0;
+ *btf_type_id = 1;
+ } else {
+ err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
+ }
if (err) {
pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
return err;
@@ -9501,6 +9758,14 @@ int bpf_map__set_initial_value(struct bpf_map *map,
return 0;
}
+const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
+{
+ if (!map->mmaped)
+ return NULL;
+ *psize = map->def.value_size;
+ return map->mmaped;
+}
+
bool bpf_map__is_offload_neutral(const struct bpf_map *map)
{
return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index bec4e6a6e31d..d98523558f39 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -471,6 +471,7 @@ LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size);
+LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
@@ -498,6 +499,7 @@ LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd);
+/* XDP related API */
struct xdp_link_info {
__u32 prog_id;
__u32 drv_prog_id;
@@ -520,6 +522,49 @@ LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
size_t info_size, __u32 flags);
+/* TC related API */
+enum bpf_tc_attach_point {
+ BPF_TC_INGRESS = 1 << 0,
+ BPF_TC_EGRESS = 1 << 1,
+ BPF_TC_CUSTOM = 1 << 2,
+};
+
+#define BPF_TC_PARENT(a, b) \
+ ((((a) << 16) & 0xFFFF0000U) | ((b) & 0x0000FFFFU))
+
+enum bpf_tc_flags {
+ BPF_TC_F_REPLACE = 1 << 0,
+};
+
+struct bpf_tc_hook {
+ size_t sz;
+ int ifindex;
+ enum bpf_tc_attach_point attach_point;
+ __u32 parent;
+ size_t :0;
+};
+#define bpf_tc_hook__last_field parent
+
+struct bpf_tc_opts {
+ size_t sz;
+ int prog_fd;
+ __u32 flags;
+ __u32 prog_id;
+ __u32 handle;
+ __u32 priority;
+ size_t :0;
+};
+#define bpf_tc_opts__last_field priority
+
+LIBBPF_API int bpf_tc_hook_create(struct bpf_tc_hook *hook);
+LIBBPF_API int bpf_tc_hook_destroy(struct bpf_tc_hook *hook);
+LIBBPF_API int bpf_tc_attach(const struct bpf_tc_hook *hook,
+ struct bpf_tc_opts *opts);
+LIBBPF_API int bpf_tc_detach(const struct bpf_tc_hook *hook,
+ const struct bpf_tc_opts *opts);
+LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook,
+ struct bpf_tc_opts *opts);
+
/* Ring buffer APIs */
struct ring_buffer;
@@ -756,6 +801,18 @@ LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s);
+struct gen_loader_opts {
+ size_t sz; /* size of this struct, for forward/backward compatiblity */
+ const char *data;
+ const char *insns;
+ __u32 data_sz;
+ __u32 insns_sz;
+};
+
+#define gen_loader_opts__last_field insns_sz
+LIBBPF_API int bpf_object__gen_loader(struct bpf_object *obj,
+ struct gen_loader_opts *opts);
+
enum libbpf_tristate {
TRI_NO = 0,
TRI_YES = 1,
@@ -768,10 +825,18 @@ struct bpf_linker_opts {
};
#define bpf_linker_opts__last_field sz
+struct bpf_linker_file_opts {
+ /* size of this struct, for forward/backward compatiblity */
+ size_t sz;
+};
+#define bpf_linker_file_opts__last_field sz
+
struct bpf_linker;
LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts);
-LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, const char *filename);
+LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker,
+ const char *filename,
+ const struct bpf_linker_file_opts *opts);
LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index b9b29baf1df8..2abef6f17c06 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -359,6 +359,13 @@ LIBBPF_0.4.0 {
bpf_linker__finalize;
bpf_linker__free;
bpf_linker__new;
+ bpf_map__initial_value;
bpf_map__inner_map;
+ bpf_object__gen_loader;
bpf_object__set_kversion;
+ bpf_tc_attach;
+ bpf_tc_detach;
+ bpf_tc_hook_create;
+ bpf_tc_hook_destroy;
+ bpf_tc_query;
} LIBBPF_0.3.0;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index acbcf6c7bdf8..a2cc297edb99 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -263,6 +263,8 @@ int bpf_object__section_size(const struct bpf_object *obj, const char *name,
int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
__u32 *off);
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
+void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
+ const char **prefix, int *kind);
struct btf_ext_info {
/*
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 9de084b1c699..b594a88620ce 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -158,7 +158,9 @@ struct bpf_linker {
static int init_output_elf(struct bpf_linker *linker, const char *file);
-static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, struct src_obj *obj);
+static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
+ const struct bpf_linker_file_opts *opts,
+ struct src_obj *obj);
static int linker_sanity_check_elf(struct src_obj *obj);
static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec);
static int linker_sanity_check_elf_relos(struct src_obj *obj, struct src_sec *sec);
@@ -435,15 +437,19 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
return 0;
}
-int bpf_linker__add_file(struct bpf_linker *linker, const char *filename)
+int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
+ const struct bpf_linker_file_opts *opts)
{
struct src_obj obj = {};
int err = 0;
+ if (!OPTS_VALID(opts, bpf_linker_file_opts))
+ return -EINVAL;
+
if (!linker->elf)
return -EINVAL;
- err = err ?: linker_load_obj_file(linker, filename, &obj);
+ err = err ?: linker_load_obj_file(linker, filename, opts, &obj);
err = err ?: linker_append_sec_data(linker, &obj);
err = err ?: linker_append_elf_syms(linker, &obj);
err = err ?: linker_append_elf_relos(linker, &obj);
@@ -529,7 +535,9 @@ static struct src_sec *add_src_sec(struct src_obj *obj, const char *sec_name)
return sec;
}
-static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, struct src_obj *obj)
+static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
+ const struct bpf_linker_file_opts *opts,
+ struct src_obj *obj)
{
#if __BYTE_ORDER == __LITTLE_ENDIAN
const int host_endianness = ELFDATA2LSB;
@@ -1780,7 +1788,7 @@ static void sym_update_visibility(Elf64_Sym *sym, int sym_vis)
/* libelf doesn't provide setters for ST_VISIBILITY,
* but it is stored in the lower 2 bits of st_other
*/
- sym->st_other &= 0x03;
+ sym->st_other &= ~0x03;
sym->st_other |= sym_vis;
}
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index d2cb28e9ef52..47444588e0d2 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -4,7 +4,10 @@
#include <stdlib.h>
#include <memory.h>
#include <unistd.h>
+#include <arpa/inet.h>
#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/pkt_cls.h>
#include <linux/rtnetlink.h>
#include <sys/socket.h>
#include <errno.h>
@@ -73,9 +76,20 @@ cleanup:
return ret;
}
-static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
- __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn,
- void *cookie)
+static void libbpf_netlink_close(int sock)
+{
+ close(sock);
+}
+
+enum {
+ NL_CONT,
+ NL_NEXT,
+ NL_DONE,
+};
+
+static int libbpf_netlink_recv(int sock, __u32 nl_pid, int seq,
+ __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn,
+ void *cookie)
{
bool multipart = true;
struct nlmsgerr *err;
@@ -84,6 +98,7 @@ static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
int len, ret;
while (multipart) {
+start:
multipart = false;
len = recv(sock, buf, sizeof(buf), 0);
if (len < 0) {
@@ -121,8 +136,16 @@ static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
}
if (_fn) {
ret = _fn(nh, fn, cookie);
- if (ret)
+ switch (ret) {
+ case NL_CONT:
+ break;
+ case NL_NEXT:
+ goto start;
+ case NL_DONE:
+ return 0;
+ default:
return ret;
+ }
}
}
}
@@ -131,72 +154,72 @@ done:
return ret;
}
+static int libbpf_netlink_send_recv(struct nlmsghdr *nh,
+ __dump_nlmsg_t parse_msg,
+ libbpf_dump_nlmsg_t parse_attr,
+ void *cookie)
+{
+ __u32 nl_pid = 0;
+ int sock, ret;
+
+ sock = libbpf_netlink_open(&nl_pid);
+ if (sock < 0)
+ return sock;
+
+ nh->nlmsg_pid = 0;
+ nh->nlmsg_seq = time(NULL);
+
+ if (send(sock, nh, nh->nlmsg_len, 0) < 0) {
+ ret = -errno;
+ goto out;
+ }
+
+ ret = libbpf_netlink_recv(sock, nl_pid, nh->nlmsg_seq,
+ parse_msg, parse_attr, cookie);
+out:
+ libbpf_netlink_close(sock);
+ return ret;
+}
+
static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
__u32 flags)
{
- int sock, seq = 0, ret;
- struct nlattr *nla, *nla_xdp;
+ struct nlattr *nla;
+ int ret;
struct {
struct nlmsghdr nh;
struct ifinfomsg ifinfo;
char attrbuf[64];
} req;
- __u32 nl_pid = 0;
-
- sock = libbpf_netlink_open(&nl_pid);
- if (sock < 0)
- return sock;
memset(&req, 0, sizeof(req));
- req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
- req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
- req.nh.nlmsg_type = RTM_SETLINK;
- req.nh.nlmsg_pid = 0;
- req.nh.nlmsg_seq = ++seq;
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_type = RTM_SETLINK;
req.ifinfo.ifi_family = AF_UNSPEC;
- req.ifinfo.ifi_index = ifindex;
-
- /* started nested attribute for XDP */
- nla = (struct nlattr *)(((char *)&req)
- + NLMSG_ALIGN(req.nh.nlmsg_len));
- nla->nla_type = NLA_F_NESTED | IFLA_XDP;
- nla->nla_len = NLA_HDRLEN;
-
- /* add XDP fd */
- nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
- nla_xdp->nla_type = IFLA_XDP_FD;
- nla_xdp->nla_len = NLA_HDRLEN + sizeof(int);
- memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd));
- nla->nla_len += nla_xdp->nla_len;
-
- /* if user passed in any flags, add those too */
+ req.ifinfo.ifi_index = ifindex;
+
+ nla = nlattr_begin_nested(&req.nh, sizeof(req), IFLA_XDP);
+ if (!nla)
+ return -EMSGSIZE;
+ ret = nlattr_add(&req.nh, sizeof(req), IFLA_XDP_FD, &fd, sizeof(fd));
+ if (ret < 0)
+ return ret;
if (flags) {
- nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
- nla_xdp->nla_type = IFLA_XDP_FLAGS;
- nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags);
- memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags));
- nla->nla_len += nla_xdp->nla_len;
+ ret = nlattr_add(&req.nh, sizeof(req), IFLA_XDP_FLAGS, &flags,
+ sizeof(flags));
+ if (ret < 0)
+ return ret;
}
-
if (flags & XDP_FLAGS_REPLACE) {
- nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
- nla_xdp->nla_type = IFLA_XDP_EXPECTED_FD;
- nla_xdp->nla_len = NLA_HDRLEN + sizeof(old_fd);
- memcpy((char *)nla_xdp + NLA_HDRLEN, &old_fd, sizeof(old_fd));
- nla->nla_len += nla_xdp->nla_len;
+ ret = nlattr_add(&req.nh, sizeof(req), IFLA_XDP_EXPECTED_FD,
+ &old_fd, sizeof(old_fd));
+ if (ret < 0)
+ return ret;
}
+ nlattr_end_nested(&req.nh, nla);
- req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
-
- if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
- ret = -errno;
- goto cleanup;
- }
- ret = bpf_netlink_recv(sock, nl_pid, seq, NULL, NULL, NULL);
-
-cleanup:
- close(sock);
- return ret;
+ return libbpf_netlink_send_recv(&req.nh, NULL, NULL, NULL);
}
int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
@@ -212,9 +235,7 @@ int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
flags |= XDP_FLAGS_REPLACE;
}
- return __bpf_set_link_xdp_fd_replace(ifindex, fd,
- old_fd,
- flags);
+ return __bpf_set_link_xdp_fd_replace(ifindex, fd, old_fd, flags);
}
int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
@@ -231,6 +252,7 @@ static int __dump_link_nlmsg(struct nlmsghdr *nlh,
len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi));
attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi)));
+
if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0)
return -LIBBPF_ERRNO__NLPARSE;
@@ -282,16 +304,21 @@ static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb)
return 0;
}
-static int libbpf_nl_get_link(int sock, unsigned int nl_pid,
- libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie);
-
int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
size_t info_size, __u32 flags)
{
struct xdp_id_md xdp_id = {};
- int sock, ret;
- __u32 nl_pid = 0;
__u32 mask;
+ int ret;
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg ifm;
+ } req = {
+ .nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nh.nlmsg_type = RTM_GETLINK,
+ .nh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .ifm.ifi_family = AF_PACKET,
+ };
if (flags & ~XDP_FLAGS_MASK || !info_size)
return -EINVAL;
@@ -302,14 +329,11 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
if (flags && flags & mask)
return -EINVAL;
- sock = libbpf_netlink_open(&nl_pid);
- if (sock < 0)
- return sock;
-
xdp_id.ifindex = ifindex;
xdp_id.flags = flags;
- ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_info, &xdp_id);
+ ret = libbpf_netlink_send_recv(&req.nh, __dump_link_nlmsg,
+ get_xdp_info, &xdp_id);
if (!ret) {
size_t sz = min(info_size, sizeof(xdp_id.info));
@@ -317,7 +341,6 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
memset((void *) info + sz, 0, info_size - sz);
}
- close(sock);
return ret;
}
@@ -349,24 +372,403 @@ int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
return ret;
}
-int libbpf_nl_get_link(int sock, unsigned int nl_pid,
- libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
+typedef int (*qdisc_config_t)(struct nlmsghdr *nh, struct tcmsg *t,
+ size_t maxsz);
+
+static int clsact_config(struct nlmsghdr *nh, struct tcmsg *t, size_t maxsz)
{
+ t->tcm_parent = TC_H_CLSACT;
+ t->tcm_handle = TC_H_MAKE(TC_H_CLSACT, 0);
+
+ return nlattr_add(nh, maxsz, TCA_KIND, "clsact", sizeof("clsact"));
+}
+
+static int attach_point_to_config(struct bpf_tc_hook *hook,
+ qdisc_config_t *config)
+{
+ switch (OPTS_GET(hook, attach_point, 0)) {
+ case BPF_TC_INGRESS:
+ case BPF_TC_EGRESS:
+ case BPF_TC_INGRESS | BPF_TC_EGRESS:
+ if (OPTS_GET(hook, parent, 0))
+ return -EINVAL;
+ *config = &clsact_config;
+ return 0;
+ case BPF_TC_CUSTOM:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tc_get_tcm_parent(enum bpf_tc_attach_point attach_point,
+ __u32 *parent)
+{
+ switch (attach_point) {
+ case BPF_TC_INGRESS:
+ case BPF_TC_EGRESS:
+ if (*parent)
+ return -EINVAL;
+ *parent = TC_H_MAKE(TC_H_CLSACT,
+ attach_point == BPF_TC_INGRESS ?
+ TC_H_MIN_INGRESS : TC_H_MIN_EGRESS);
+ break;
+ case BPF_TC_CUSTOM:
+ if (!*parent)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags)
+{
+ qdisc_config_t config;
+ int ret;
struct {
- struct nlmsghdr nlh;
- struct ifinfomsg ifm;
- } req = {
- .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
- .nlh.nlmsg_type = RTM_GETLINK,
- .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
- .ifm.ifi_family = AF_PACKET,
- };
- int seq = time(NULL);
+ struct nlmsghdr nh;
+ struct tcmsg tc;
+ char buf[256];
+ } req;
+
+ ret = attach_point_to_config(hook, &config);
+ if (ret < 0)
+ return ret;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | flags;
+ req.nh.nlmsg_type = cmd;
+ req.tc.tcm_family = AF_UNSPEC;
+ req.tc.tcm_ifindex = OPTS_GET(hook, ifindex, 0);
+
+ ret = config(&req.nh, &req.tc, sizeof(req));
+ if (ret < 0)
+ return ret;
+
+ return libbpf_netlink_send_recv(&req.nh, NULL, NULL, NULL);
+}
+
+static int tc_qdisc_create_excl(struct bpf_tc_hook *hook)
+{
+ return tc_qdisc_modify(hook, RTM_NEWQDISC, NLM_F_CREATE);
+}
+
+static int tc_qdisc_delete(struct bpf_tc_hook *hook)
+{
+ return tc_qdisc_modify(hook, RTM_DELQDISC, 0);
+}
+
+int bpf_tc_hook_create(struct bpf_tc_hook *hook)
+{
+ if (!hook || !OPTS_VALID(hook, bpf_tc_hook) ||
+ OPTS_GET(hook, ifindex, 0) <= 0)
+ return -EINVAL;
+
+ return tc_qdisc_create_excl(hook);
+}
- req.nlh.nlmsg_seq = seq;
- if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+static int __bpf_tc_detach(const struct bpf_tc_hook *hook,
+ const struct bpf_tc_opts *opts,
+ const bool flush);
+
+int bpf_tc_hook_destroy(struct bpf_tc_hook *hook)
+{
+ if (!hook || !OPTS_VALID(hook, bpf_tc_hook) ||
+ OPTS_GET(hook, ifindex, 0) <= 0)
+ return -EINVAL;
+
+ switch (OPTS_GET(hook, attach_point, 0)) {
+ case BPF_TC_INGRESS:
+ case BPF_TC_EGRESS:
+ return __bpf_tc_detach(hook, NULL, true);
+ case BPF_TC_INGRESS | BPF_TC_EGRESS:
+ return tc_qdisc_delete(hook);
+ case BPF_TC_CUSTOM:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+struct bpf_cb_ctx {
+ struct bpf_tc_opts *opts;
+ bool processed;
+};
+
+static int __get_tc_info(void *cookie, struct tcmsg *tc, struct nlattr **tb,
+ bool unicast)
+{
+ struct nlattr *tbb[TCA_BPF_MAX + 1];
+ struct bpf_cb_ctx *info = cookie;
+
+ if (!info || !info->opts)
+ return -EINVAL;
+ if (unicast && info->processed)
+ return -EINVAL;
+ if (!tb[TCA_OPTIONS])
+ return NL_CONT;
+
+ libbpf_nla_parse_nested(tbb, TCA_BPF_MAX, tb[TCA_OPTIONS], NULL);
+ if (!tbb[TCA_BPF_ID])
+ return -EINVAL;
+
+ OPTS_SET(info->opts, prog_id, libbpf_nla_getattr_u32(tbb[TCA_BPF_ID]));
+ OPTS_SET(info->opts, handle, tc->tcm_handle);
+ OPTS_SET(info->opts, priority, TC_H_MAJ(tc->tcm_info) >> 16);
+
+ info->processed = true;
+ return unicast ? NL_NEXT : NL_DONE;
+}
+
+static int get_tc_info(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
+ void *cookie)
+{
+ struct tcmsg *tc = NLMSG_DATA(nh);
+ struct nlattr *tb[TCA_MAX + 1];
+
+ libbpf_nla_parse(tb, TCA_MAX,
+ (struct nlattr *)((char *)tc + NLMSG_ALIGN(sizeof(*tc))),
+ NLMSG_PAYLOAD(nh, sizeof(*tc)), NULL);
+ if (!tb[TCA_KIND])
+ return NL_CONT;
+ return __get_tc_info(cookie, tc, tb, nh->nlmsg_flags & NLM_F_ECHO);
+}
+
+static int tc_add_fd_and_name(struct nlmsghdr *nh, size_t maxsz, int fd)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ char name[256];
+ int len, ret;
+
+ ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ if (ret < 0)
+ return ret;
+
+ ret = nlattr_add(nh, maxsz, TCA_BPF_FD, &fd, sizeof(fd));
+ if (ret < 0)
+ return ret;
+ len = snprintf(name, sizeof(name), "%s:[%u]", info.name, info.id);
+ if (len < 0)
return -errno;
+ if (len >= sizeof(name))
+ return -ENAMETOOLONG;
+ return nlattr_add(nh, maxsz, TCA_BPF_NAME, name, len + 1);
+}
+
+int bpf_tc_attach(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
+{
+ __u32 protocol, bpf_flags, handle, priority, parent, prog_id, flags;
+ int ret, ifindex, attach_point, prog_fd;
+ struct bpf_cb_ctx info = {};
+ struct nlattr *nla;
+ struct {
+ struct nlmsghdr nh;
+ struct tcmsg tc;
+ char buf[256];
+ } req;
+
+ if (!hook || !opts ||
+ !OPTS_VALID(hook, bpf_tc_hook) ||
+ !OPTS_VALID(opts, bpf_tc_opts))
+ return -EINVAL;
+
+ ifindex = OPTS_GET(hook, ifindex, 0);
+ parent = OPTS_GET(hook, parent, 0);
+ attach_point = OPTS_GET(hook, attach_point, 0);
+
+ handle = OPTS_GET(opts, handle, 0);
+ priority = OPTS_GET(opts, priority, 0);
+ prog_fd = OPTS_GET(opts, prog_fd, 0);
+ prog_id = OPTS_GET(opts, prog_id, 0);
+ flags = OPTS_GET(opts, flags, 0);
+
+ if (ifindex <= 0 || !prog_fd || prog_id)
+ return -EINVAL;
+ if (priority > UINT16_MAX)
+ return -EINVAL;
+ if (flags & ~BPF_TC_F_REPLACE)
+ return -EINVAL;
+
+ flags = (flags & BPF_TC_F_REPLACE) ? NLM_F_REPLACE : NLM_F_EXCL;
+ protocol = ETH_P_ALL;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE |
+ NLM_F_ECHO | flags;
+ req.nh.nlmsg_type = RTM_NEWTFILTER;
+ req.tc.tcm_family = AF_UNSPEC;
+ req.tc.tcm_ifindex = ifindex;
+ req.tc.tcm_handle = handle;
+ req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
+
+ ret = tc_get_tcm_parent(attach_point, &parent);
+ if (ret < 0)
+ return ret;
+ req.tc.tcm_parent = parent;
+
+ ret = nlattr_add(&req.nh, sizeof(req), TCA_KIND, "bpf", sizeof("bpf"));
+ if (ret < 0)
+ return ret;
+ nla = nlattr_begin_nested(&req.nh, sizeof(req), TCA_OPTIONS);
+ if (!nla)
+ return -EMSGSIZE;
+ ret = tc_add_fd_and_name(&req.nh, sizeof(req), prog_fd);
+ if (ret < 0)
+ return ret;
+ bpf_flags = TCA_BPF_FLAG_ACT_DIRECT;
+ ret = nlattr_add(&req.nh, sizeof(req), TCA_BPF_FLAGS, &bpf_flags,
+ sizeof(bpf_flags));
+ if (ret < 0)
+ return ret;
+ nlattr_end_nested(&req.nh, nla);
+
+ info.opts = opts;
+
+ ret = libbpf_netlink_send_recv(&req.nh, get_tc_info, NULL, &info);
+ if (ret < 0)
+ return ret;
+ if (!info.processed)
+ return -ENOENT;
+ return ret;
+}
+
+static int __bpf_tc_detach(const struct bpf_tc_hook *hook,
+ const struct bpf_tc_opts *opts,
+ const bool flush)
+{
+ __u32 protocol = 0, handle, priority, parent, prog_id, flags;
+ int ret, ifindex, attach_point, prog_fd;
+ struct {
+ struct nlmsghdr nh;
+ struct tcmsg tc;
+ char buf[256];
+ } req;
- return bpf_netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg,
- dump_link_nlmsg, cookie);
+ if (!hook ||
+ !OPTS_VALID(hook, bpf_tc_hook) ||
+ !OPTS_VALID(opts, bpf_tc_opts))
+ return -EINVAL;
+
+ ifindex = OPTS_GET(hook, ifindex, 0);
+ parent = OPTS_GET(hook, parent, 0);
+ attach_point = OPTS_GET(hook, attach_point, 0);
+
+ handle = OPTS_GET(opts, handle, 0);
+ priority = OPTS_GET(opts, priority, 0);
+ prog_fd = OPTS_GET(opts, prog_fd, 0);
+ prog_id = OPTS_GET(opts, prog_id, 0);
+ flags = OPTS_GET(opts, flags, 0);
+
+ if (ifindex <= 0 || flags || prog_fd || prog_id)
+ return -EINVAL;
+ if (priority > UINT16_MAX)
+ return -EINVAL;
+ if (flags & ~BPF_TC_F_REPLACE)
+ return -EINVAL;
+ if (!flush) {
+ if (!handle || !priority)
+ return -EINVAL;
+ protocol = ETH_P_ALL;
+ } else {
+ if (handle || priority)
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_type = RTM_DELTFILTER;
+ req.tc.tcm_family = AF_UNSPEC;
+ req.tc.tcm_ifindex = ifindex;
+ if (!flush) {
+ req.tc.tcm_handle = handle;
+ req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
+ }
+
+ ret = tc_get_tcm_parent(attach_point, &parent);
+ if (ret < 0)
+ return ret;
+ req.tc.tcm_parent = parent;
+
+ if (!flush) {
+ ret = nlattr_add(&req.nh, sizeof(req), TCA_KIND,
+ "bpf", sizeof("bpf"));
+ if (ret < 0)
+ return ret;
+ }
+
+ return libbpf_netlink_send_recv(&req.nh, NULL, NULL, NULL);
+}
+
+int bpf_tc_detach(const struct bpf_tc_hook *hook,
+ const struct bpf_tc_opts *opts)
+{
+ return !opts ? -EINVAL : __bpf_tc_detach(hook, opts, false);
+}
+
+int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
+{
+ __u32 protocol, handle, priority, parent, prog_id, flags;
+ int ret, ifindex, attach_point, prog_fd;
+ struct bpf_cb_ctx info = {};
+ struct {
+ struct nlmsghdr nh;
+ struct tcmsg tc;
+ char buf[256];
+ } req;
+
+ if (!hook || !opts ||
+ !OPTS_VALID(hook, bpf_tc_hook) ||
+ !OPTS_VALID(opts, bpf_tc_opts))
+ return -EINVAL;
+
+ ifindex = OPTS_GET(hook, ifindex, 0);
+ parent = OPTS_GET(hook, parent, 0);
+ attach_point = OPTS_GET(hook, attach_point, 0);
+
+ handle = OPTS_GET(opts, handle, 0);
+ priority = OPTS_GET(opts, priority, 0);
+ prog_fd = OPTS_GET(opts, prog_fd, 0);
+ prog_id = OPTS_GET(opts, prog_id, 0);
+ flags = OPTS_GET(opts, flags, 0);
+
+ if (ifindex <= 0 || flags || prog_fd || prog_id ||
+ !handle || !priority)
+ return -EINVAL;
+ if (priority > UINT16_MAX)
+ return -EINVAL;
+
+ protocol = ETH_P_ALL;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST;
+ req.nh.nlmsg_type = RTM_GETTFILTER;
+ req.tc.tcm_family = AF_UNSPEC;
+ req.tc.tcm_ifindex = ifindex;
+ req.tc.tcm_handle = handle;
+ req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
+
+ ret = tc_get_tcm_parent(attach_point, &parent);
+ if (ret < 0)
+ return ret;
+ req.tc.tcm_parent = parent;
+
+ ret = nlattr_add(&req.nh, sizeof(req), TCA_KIND, "bpf", sizeof("bpf"));
+ if (ret < 0)
+ return ret;
+
+ info.opts = opts;
+
+ ret = libbpf_netlink_send_recv(&req.nh, get_tc_info, NULL, &info);
+ if (ret < 0)
+ return ret;
+ if (!info.processed)
+ return -ENOENT;
+ return ret;
}
diff --git a/tools/lib/bpf/nlattr.h b/tools/lib/bpf/nlattr.h
index 6cc3ac91690f..3c780ab6d022 100644
--- a/tools/lib/bpf/nlattr.h
+++ b/tools/lib/bpf/nlattr.h
@@ -10,7 +10,10 @@
#define __LIBBPF_NLATTR_H
#include <stdint.h>
+#include <string.h>
+#include <errno.h>
#include <linux/netlink.h>
+
/* avoid multiple definition of netlink features */
#define __LINUX_NETLINK_H
@@ -103,4 +106,49 @@ int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype,
int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh);
+static inline struct nlattr *nla_data(struct nlattr *nla)
+{
+ return (struct nlattr *)((char *)nla + NLA_HDRLEN);
+}
+
+static inline struct nlattr *nh_tail(struct nlmsghdr *nh)
+{
+ return (struct nlattr *)((char *)nh + NLMSG_ALIGN(nh->nlmsg_len));
+}
+
+static inline int nlattr_add(struct nlmsghdr *nh, size_t maxsz, int type,
+ const void *data, int len)
+{
+ struct nlattr *nla;
+
+ if (NLMSG_ALIGN(nh->nlmsg_len) + NLA_ALIGN(NLA_HDRLEN + len) > maxsz)
+ return -EMSGSIZE;
+ if (!!data != !!len)
+ return -EINVAL;
+
+ nla = nh_tail(nh);
+ nla->nla_type = type;
+ nla->nla_len = NLA_HDRLEN + len;
+ if (data)
+ memcpy(nla_data(nla), data, len);
+ nh->nlmsg_len = NLMSG_ALIGN(nh->nlmsg_len) + NLA_ALIGN(nla->nla_len);
+ return 0;
+}
+
+static inline struct nlattr *nlattr_begin_nested(struct nlmsghdr *nh,
+ size_t maxsz, int type)
+{
+ struct nlattr *tail;
+
+ tail = nh_tail(nh);
+ if (nlattr_add(nh, maxsz, type | NLA_F_NESTED, NULL, 0))
+ return NULL;
+ return tail;
+}
+
+static inline void nlattr_end_nested(struct nlmsghdr *nh, struct nlattr *tail)
+{
+ tail->nla_len = (char *)nh_tail(nh) - (char *)tail;
+}
+
#endif /* __LIBBPF_NLATTR_H */
diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h
new file mode 100644
index 000000000000..b22b50c1b173
--- /dev/null
+++ b/tools/lib/bpf/skel_internal.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2021 Facebook */
+#ifndef __SKEL_INTERNAL_H
+#define __SKEL_INTERNAL_H
+
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/mman.h>
+
+/* This file is a base header for auto-generated *.lskel.h files.
+ * Its contents will change and may become part of auto-generation in the future.
+ *
+ * The layout of bpf_[map|prog]_desc and bpf_loader_ctx is feature dependent
+ * and will change from one version of libbpf to another and features
+ * requested during loader program generation.
+ */
+struct bpf_map_desc {
+ union {
+ /* input for the loader prog */
+ struct {
+ __aligned_u64 initial_value;
+ __u32 max_entries;
+ };
+ /* output of the loader prog */
+ struct {
+ int map_fd;
+ };
+ };
+};
+struct bpf_prog_desc {
+ int prog_fd;
+};
+
+struct bpf_loader_ctx {
+ size_t sz;
+ __u32 log_level;
+ __u32 log_size;
+ __u64 log_buf;
+};
+
+struct bpf_load_and_run_opts {
+ struct bpf_loader_ctx *ctx;
+ const void *data;
+ const void *insns;
+ __u32 data_sz;
+ __u32 insns_sz;
+ const char *errstr;
+};
+
+static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
+ unsigned int size)
+{
+ return syscall(__NR_bpf, cmd, attr, size);
+}
+
+static inline int skel_closenz(int fd)
+{
+ if (fd > 0)
+ return close(fd);
+ return -EINVAL;
+}
+
+static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
+{
+ int map_fd = -1, prog_fd = -1, key = 0, err;
+ union bpf_attr attr;
+
+ map_fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "__loader.map", 4,
+ opts->data_sz, 1, 0);
+ if (map_fd < 0) {
+ opts->errstr = "failed to create loader map";
+ err = -errno;
+ goto out;
+ }
+
+ err = bpf_map_update_elem(map_fd, &key, opts->data, 0);
+ if (err < 0) {
+ opts->errstr = "failed to update loader map";
+ err = -errno;
+ goto out;
+ }
+
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_type = BPF_PROG_TYPE_SYSCALL;
+ attr.insns = (long) opts->insns;
+ attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
+ attr.license = (long) "Dual BSD/GPL";
+ memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog"));
+ attr.fd_array = (long) &map_fd;
+ attr.log_level = opts->ctx->log_level;
+ attr.log_size = opts->ctx->log_size;
+ attr.log_buf = opts->ctx->log_buf;
+ attr.prog_flags = BPF_F_SLEEPABLE;
+ prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ if (prog_fd < 0) {
+ opts->errstr = "failed to load loader prog";
+ err = -errno;
+ goto out;
+ }
+
+ memset(&attr, 0, sizeof(attr));
+ attr.test.prog_fd = prog_fd;
+ attr.test.ctx_in = (long) opts->ctx;
+ attr.test.ctx_size_in = opts->ctx->sz;
+ err = skel_sys_bpf(BPF_PROG_RUN, &attr, sizeof(attr));
+ if (err < 0 || (int)attr.test.retval < 0) {
+ opts->errstr = "failed to execute loader prog";
+ if (err < 0)
+ err = -errno;
+ else
+ err = (int)attr.test.retval;
+ goto out;
+ }
+ err = 0;
+out:
+ if (map_fd >= 0)
+ close(map_fd);
+ if (prog_fd >= 0)
+ close(prog_fd);
+ return err;
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 4866f6a21901..a030aa4a8a9e 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -30,6 +30,7 @@ test_sysctl
xdping
test_cpp
*.skel.h
+*.lskel.h
/no_alu32
/bpf_gcc
/tools
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 511259c2c6c5..525e4b3fb514 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -312,6 +312,10 @@ SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
linked_vars.skel.h linked_maps.skel.h
+LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
+ test_ksyms_module.c test_ringbuf.c atomics.c trace_printk.c
+SKEL_BLACKLIST += $$(LSKELS)
+
test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
linked_funcs.skel.h-deps := linked_funcs1.o linked_funcs2.o
linked_vars.skel.h-deps := linked_vars1.o linked_vars2.o
@@ -339,6 +343,7 @@ TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS)
TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \
$$(filter-out $(SKEL_BLACKLIST) $(LINKED_BPF_SRCS),\
$$(TRUNNER_BPF_SRCS)))
+TRUNNER_BPF_LSKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS))
TRUNNER_BPF_SKELS_LINKED := $$(addprefix $$(TRUNNER_OUTPUT)/,$(LINKED_SKELS))
TEST_GEN_FILES += $$(TRUNNER_BPF_OBJS)
@@ -380,6 +385,14 @@ $(TRUNNER_BPF_SKELS): %.skel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
+$(TRUNNER_BPF_LSKELS): %.lskel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
+ $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked1.o) $$<
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked2.o) $$(<:.o=.linked1.o)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o)
+ $(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
+ $(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
+
$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.o))
$(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked1.o) $$(addprefix $(TRUNNER_OUTPUT)/,$$($$(@F)-deps))
@@ -409,6 +422,7 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
$(TRUNNER_EXTRA_HDRS) \
$(TRUNNER_BPF_OBJS) \
$(TRUNNER_BPF_SKELS) \
+ $(TRUNNER_BPF_LSKELS) \
$(TRUNNER_BPF_SKELS_LINKED) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
@@ -516,6 +530,6 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature \
- $(addprefix $(OUTPUT)/,*.o *.skel.h no_alu32 bpf_gcc bpf_testmod.ko)
+ $(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h no_alu32 bpf_gcc bpf_testmod.ko)
.PHONY: docs docs-clean
diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c
index 21efe7bbf10d..ba0e1efe5a45 100644
--- a/tools/testing/selftests/bpf/prog_tests/atomics.c
+++ b/tools/testing/selftests/bpf/prog_tests/atomics.c
@@ -2,19 +2,19 @@
#include <test_progs.h>
-#include "atomics.skel.h"
+#include "atomics.lskel.h"
static void test_add(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.add);
- if (CHECK(IS_ERR(link), "attach(add)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__add__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(add)"))
return;
- prog_fd = bpf_program__fd(skel->progs.add);
+ prog_fd = skel->progs.add.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run add",
@@ -33,20 +33,20 @@ static void test_add(struct atomics *skel)
ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
static void test_sub(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.sub);
- if (CHECK(IS_ERR(link), "attach(sub)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__sub__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(sub)"))
return;
- prog_fd = bpf_program__fd(skel->progs.sub);
+ prog_fd = skel->progs.sub.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run sub",
@@ -66,20 +66,20 @@ static void test_sub(struct atomics *skel)
ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
static void test_and(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.and);
- if (CHECK(IS_ERR(link), "attach(and)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__and__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(and)"))
return;
- prog_fd = bpf_program__fd(skel->progs.and);
+ prog_fd = skel->progs.and.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run and",
@@ -94,20 +94,20 @@ static void test_and(struct atomics *skel)
ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
static void test_or(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.or);
- if (CHECK(IS_ERR(link), "attach(or)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__or__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(or)"))
return;
- prog_fd = bpf_program__fd(skel->progs.or);
+ prog_fd = skel->progs.or.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run or",
@@ -123,20 +123,20 @@ static void test_or(struct atomics *skel)
ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
static void test_xor(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.xor);
- if (CHECK(IS_ERR(link), "attach(xor)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__xor__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(xor)"))
return;
- prog_fd = bpf_program__fd(skel->progs.xor);
+ prog_fd = skel->progs.xor.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run xor",
@@ -151,20 +151,20 @@ static void test_xor(struct atomics *skel)
ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
static void test_cmpxchg(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.cmpxchg);
- if (CHECK(IS_ERR(link), "attach(cmpxchg)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__cmpxchg__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)"))
return;
- prog_fd = bpf_program__fd(skel->progs.cmpxchg);
+ prog_fd = skel->progs.cmpxchg.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run add",
@@ -180,20 +180,20 @@ static void test_cmpxchg(struct atomics *skel)
ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
static void test_xchg(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.xchg);
- if (CHECK(IS_ERR(link), "attach(xchg)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__xchg__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(xchg)"))
return;
- prog_fd = bpf_program__fd(skel->progs.xchg);
+ prog_fd = skel->progs.xchg.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run add",
@@ -207,7 +207,7 @@ static void test_xchg(struct atomics *skel)
ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
void test_atomics(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
index 109d0345a2be..91154c2ba256 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "fentry_test.skel.h"
-#include "fexit_test.skel.h"
+#include "fentry_test.lskel.h"
+#include "fexit_test.lskel.h"
void test_fentry_fexit(void)
{
@@ -26,7 +26,7 @@ void test_fentry_fexit(void)
if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
goto close_prog;
- prog_fd = bpf_program__fd(fexit_skel->progs.test1);
+ prog_fd = fexit_skel->progs.test1.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
CHECK(err || retval, "ipv6",
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
index 7cb111b11995..174c89e7456e 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "fentry_test.skel.h"
+#include "fentry_test.lskel.h"
static int fentry_test(struct fentry_test *fentry_skel)
{
int err, prog_fd, i;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
__u64 *result;
err = fentry_test__attach(fentry_skel);
@@ -15,11 +15,11 @@ static int fentry_test(struct fentry_test *fentry_skel)
return err;
/* Check that already linked program can't be attached again. */
- link = bpf_program__attach(fentry_skel->progs.test1);
- if (!ASSERT_ERR_PTR(link, "fentry_attach_link"))
+ link_fd = fentry_test__test1__attach(fentry_skel);
+ if (!ASSERT_LT(link_fd, 0, "fentry_attach_link"))
return -1;
- prog_fd = bpf_program__fd(fentry_skel->progs.test1);
+ prog_fd = fentry_skel->progs.test1.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
ASSERT_OK(err, "test_run");
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
index ccc7e8a34ab6..4e7f4b42ea29 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
@@ -6,7 +6,7 @@
#include <time.h>
#include <sys/mman.h>
#include <sys/syscall.h>
-#include "fexit_sleep.skel.h"
+#include "fexit_sleep.lskel.h"
static int do_sleep(void *skel)
{
@@ -58,8 +58,8 @@ void test_fexit_sleep(void)
* waiting for percpu_ref_kill to confirm). The other one
* will be freed quickly.
*/
- close(bpf_program__fd(fexit_skel->progs.nanosleep_fentry));
- close(bpf_program__fd(fexit_skel->progs.nanosleep_fexit));
+ close(fexit_skel->progs.nanosleep_fentry.prog_fd);
+ close(fexit_skel->progs.nanosleep_fexit.prog_fd);
fexit_sleep__detach(fexit_skel);
/* kill the thread to unwind sys_nanosleep stack through the trampoline */
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
index 6792e41f7f69..af3dba726701 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "fexit_test.skel.h"
+#include "fexit_test.lskel.h"
static int fexit_test(struct fexit_test *fexit_skel)
{
int err, prog_fd, i;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
__u64 *result;
err = fexit_test__attach(fexit_skel);
@@ -15,11 +15,11 @@ static int fexit_test(struct fexit_test *fexit_skel)
return err;
/* Check that already linked program can't be attached again. */
- link = bpf_program__attach(fexit_skel->progs.test1);
- if (!ASSERT_ERR_PTR(link, "fexit_attach_link"))
+ link_fd = fexit_test__test1__attach(fexit_skel);
+ if (!ASSERT_LT(link_fd, 0, "fexit_attach_link"))
return -1;
- prog_fd = bpf_program__fd(fexit_skel->progs.test1);
+ prog_fd = fexit_skel->progs.test1.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
ASSERT_OK(err, "test_run");
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
index 7fc0951ee75f..30a7b9b837bf 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <network_helpers.h>
-#include "kfunc_call_test.skel.h"
+#include "kfunc_call_test.lskel.h"
#include "kfunc_call_test_subprog.skel.h"
static void test_main(void)
@@ -14,13 +14,13 @@ static void test_main(void)
if (!ASSERT_OK_PTR(skel, "skel"))
return;
- prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1);
+ prog_fd = skel->progs.kfunc_call_test1.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
NULL, NULL, (__u32 *)&retval, NULL);
ASSERT_OK(err, "bpf_prog_test_run(test1)");
ASSERT_EQ(retval, 12, "test1-retval");
- prog_fd = bpf_program__fd(skel->progs.kfunc_call_test2);
+ prog_fd = skel->progs.kfunc_call_test2.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
NULL, NULL, (__u32 *)&retval, NULL);
ASSERT_OK(err, "bpf_prog_test_run(test2)");
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c
index 4c232b456479..2cd5cded543f 100644
--- a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c
@@ -4,7 +4,7 @@
#include <test_progs.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
-#include "test_ksyms_module.skel.h"
+#include "test_ksyms_module.lskel.h"
static int duration;
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
index f9a8ae331963..a01788090c31 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
@@ -12,7 +12,7 @@
#include <sys/sysinfo.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
-#include "test_ringbuf.skel.h"
+#include "test_ringbuf.lskel.h"
#define EDONE 7777
@@ -94,9 +94,7 @@ void test_ringbuf(void)
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
- err = bpf_map__set_max_entries(skel->maps.ringbuf, page_size);
- if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
- goto cleanup;
+ skel->maps.ringbuf.max_entries = page_size;
err = test_ringbuf__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
@@ -151,7 +149,7 @@ void test_ringbuf(void)
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
- ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf),
+ ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
process_sample, NULL, NULL);
if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index 7043e6ded0e6..a1eade51d440 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -2,7 +2,7 @@
#include <test_progs.h>
#include "test_send_signal_kern.skel.h"
-static volatile int sigusr1_received = 0;
+int sigusr1_received = 0;
static void sigusr1_handler(int signum)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c
index fe87b77af459..f6f130c99b8c 100644
--- a/tools/testing/selftests/bpf/prog_tests/skeleton.c
+++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c
@@ -82,10 +82,8 @@ void test_skeleton(void)
CHECK(data->out2 != 2, "res2", "got %lld != exp %d\n", data->out2, 2);
CHECK(bss->out3 != 3, "res3", "got %d != exp %d\n", (int)bss->out3, 3);
CHECK(bss->out4 != 4, "res4", "got %lld != exp %d\n", bss->out4, 4);
- CHECK(bss->handler_out5.a != 5, "res5", "got %d != exp %d\n",
- bss->handler_out5.a, 5);
- CHECK(bss->handler_out5.b != 6, "res6", "got %lld != exp %d\n",
- bss->handler_out5.b, 6);
+ CHECK(bss->out5.a != 5, "res5", "got %d != exp %d\n", bss->out5.a, 5);
+ CHECK(bss->out5.b != 6, "res6", "got %lld != exp %d\n", bss->out5.b, 6);
CHECK(bss->out6 != 14, "res7", "got %d != exp %d\n", bss->out6, 14);
CHECK(bss->bpf_syscall != kcfg->CONFIG_BPF_SYSCALL, "ext1",
diff --git a/tools/testing/selftests/bpf/prog_tests/static_linked.c b/tools/testing/selftests/bpf/prog_tests/static_linked.c
index 46556976dccc..5c4e3014e063 100644
--- a/tools/testing/selftests/bpf/prog_tests/static_linked.c
+++ b/tools/testing/selftests/bpf/prog_tests/static_linked.c
@@ -14,12 +14,7 @@ void test_static_linked(void)
return;
skel->rodata->rovar1 = 1;
- skel->bss->static_var1 = 2;
- skel->bss->static_var11 = 3;
-
skel->rodata->rovar2 = 4;
- skel->bss->static_var2 = 5;
- skel->bss->static_var22 = 6;
err = test_static_linked__load(skel);
if (!ASSERT_OK(err, "skel_load"))
@@ -32,8 +27,8 @@ void test_static_linked(void)
/* trigger */
usleep(1);
- ASSERT_EQ(skel->bss->var1, 1 * 2 + 2 + 3, "var1");
- ASSERT_EQ(skel->bss->var2, 4 * 3 + 5 + 6, "var2");
+ ASSERT_EQ(skel->data->var1, 1 * 2 + 2 + 3, "var1");
+ ASSERT_EQ(skel->data->var2, 4 * 3 + 5 + 6, "var2");
cleanup:
test_static_linked__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/syscall.c b/tools/testing/selftests/bpf/prog_tests/syscall.c
new file mode 100644
index 000000000000..81e997a69f7a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/syscall.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include "syscall.skel.h"
+
+struct args {
+ __u64 log_buf;
+ __u32 log_size;
+ int max_entries;
+ int map_fd;
+ int prog_fd;
+ int btf_fd;
+};
+
+void test_syscall(void)
+{
+ static char verifier_log[8192];
+ struct args ctx = {
+ .max_entries = 1024,
+ .log_buf = (uintptr_t) verifier_log,
+ .log_size = sizeof(verifier_log),
+ };
+ struct bpf_prog_test_run_attr tattr = {
+ .ctx_in = &ctx,
+ .ctx_size_in = sizeof(ctx),
+ };
+ struct syscall *skel = NULL;
+ __u64 key = 12, value = 0;
+ int err;
+
+ skel = syscall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ tattr.prog_fd = bpf_program__fd(skel->progs.bpf_prog);
+ err = bpf_prog_test_run_xattr(&tattr);
+ ASSERT_EQ(err, 0, "err");
+ ASSERT_EQ(tattr.retval, 1, "retval");
+ ASSERT_GT(ctx.map_fd, 0, "ctx.map_fd");
+ ASSERT_GT(ctx.prog_fd, 0, "ctx.prog_fd");
+ ASSERT_OK(memcmp(verifier_log, "processed", sizeof("processed") - 1),
+ "verifier_log");
+
+ err = bpf_map_lookup_elem(ctx.map_fd, &key, &value);
+ ASSERT_EQ(err, 0, "map_lookup");
+ ASSERT_EQ(value, 34, "map lookup value");
+cleanup:
+ syscall__destroy(skel);
+ if (ctx.prog_fd > 0)
+ close(ctx.prog_fd);
+ if (ctx.map_fd > 0)
+ close(ctx.map_fd);
+ if (ctx.btf_fd > 0)
+ close(ctx.btf_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c
new file mode 100644
index 000000000000..4a505a5adf4d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include <linux/pkt_cls.h>
+
+#include "test_tc_bpf.skel.h"
+
+#define LO_IFINDEX 1
+
+#define TEST_DECLARE_OPTS(__fd) \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_h, .handle = 1); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_p, .priority = 1); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_f, .prog_fd = __fd); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hp, .handle = 1, .priority = 1); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hf, .handle = 1, .prog_fd = __fd); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_pf, .priority = 1, .prog_fd = __fd); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpf, .handle = 1, .priority = 1, .prog_fd = __fd); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpi, .handle = 1, .priority = 1, .prog_id = 42); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpr, .handle = 1, .priority = 1, \
+ .flags = BPF_TC_F_REPLACE); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpfi, .handle = 1, .priority = 1, .prog_fd = __fd, \
+ .prog_id = 42); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_prio_max, .handle = 1, .priority = UINT16_MAX + 1);
+
+static int test_tc_bpf_basic(const struct bpf_tc_hook *hook, int fd)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd);
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int ret;
+
+ ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd"))
+ return ret;
+
+ ret = bpf_tc_attach(hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach"))
+ return ret;
+
+ if (!ASSERT_EQ(opts.handle, 1, "handle set") ||
+ !ASSERT_EQ(opts.priority, 1, "priority set") ||
+ !ASSERT_EQ(opts.prog_id, info.id, "prog_id set"))
+ goto end;
+
+ opts.prog_id = 0;
+ opts.flags = BPF_TC_F_REPLACE;
+ ret = bpf_tc_attach(hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach replace mode"))
+ goto end;
+
+ opts.flags = opts.prog_fd = opts.prog_id = 0;
+ ret = bpf_tc_query(hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_query"))
+ goto end;
+
+ if (!ASSERT_EQ(opts.handle, 1, "handle set") ||
+ !ASSERT_EQ(opts.priority, 1, "priority set") ||
+ !ASSERT_EQ(opts.prog_id, info.id, "prog_id set"))
+ goto end;
+
+end:
+ opts.flags = opts.prog_fd = opts.prog_id = 0;
+ ret = bpf_tc_detach(hook, &opts);
+ ASSERT_OK(ret, "bpf_tc_detach");
+ return ret;
+}
+
+static int test_tc_bpf_api(struct bpf_tc_hook *hook, int fd)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, attach_opts, .handle = 1, .priority = 1, .prog_fd = fd);
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, inv_hook, .attach_point = BPF_TC_INGRESS);
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1);
+ int ret;
+
+ ret = bpf_tc_hook_create(NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook = NULL"))
+ return -EINVAL;
+
+ /* hook ifindex = 0 */
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook ifindex == 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook ifindex == 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook ifindex == 0"))
+ return -EINVAL;
+ attach_opts.prog_id = 0;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook ifindex == 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook ifindex == 0"))
+ return -EINVAL;
+
+ /* hook ifindex < 0 */
+ inv_hook.ifindex = -1;
+
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook ifindex < 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook ifindex < 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook ifindex < 0"))
+ return -EINVAL;
+ attach_opts.prog_id = 0;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook ifindex < 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook ifindex < 0"))
+ return -EINVAL;
+
+ inv_hook.ifindex = LO_IFINDEX;
+
+ /* hook.attach_point invalid */
+ inv_hook.attach_point = 0xabcd;
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook.attach_point"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook.attach_point"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook.attach_point"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook.attach_point"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook.attach_point"))
+ return -EINVAL;
+
+ inv_hook.attach_point = BPF_TC_INGRESS;
+
+ /* hook.attach_point valid, but parent invalid */
+ inv_hook.parent = TC_H_MAKE(1UL << 16, 10);
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook parent"))
+ return -EINVAL;
+
+ inv_hook.attach_point = BPF_TC_CUSTOM;
+ inv_hook.parent = 0;
+ /* These return EOPNOTSUPP instead of EINVAL as parent is checked after
+ * attach_point of the hook.
+ */
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_create invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_destroy invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook parent"))
+ return -EINVAL;
+
+ inv_hook.attach_point = BPF_TC_INGRESS;
+
+ /* detach */
+ {
+ TEST_DECLARE_OPTS(fd);
+
+ ret = bpf_tc_detach(NULL, &opts_hp);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid opts = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_hpr);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid flags set"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_hpf);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid prog_fd set"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_hpi);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid prog_id set"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_p);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid handle unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_h);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid priority unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_prio_max);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid priority > UINT16_MAX"))
+ return -EINVAL;
+ }
+
+ /* query */
+ {
+ TEST_DECLARE_OPTS(fd);
+
+ ret = bpf_tc_query(NULL, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid opts = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_hpr);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid flags set"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_hpf);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid prog_fd set"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_hpi);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid prog_id set"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_p);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid handle unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_h);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid priority unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_prio_max);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid priority > UINT16_MAX"))
+ return -EINVAL;
+
+ /* when chain is not present, kernel returns -EINVAL */
+ ret = bpf_tc_query(hook, &opts_hp);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query valid handle, priority set"))
+ return -EINVAL;
+ }
+
+ /* attach */
+ {
+ TEST_DECLARE_OPTS(fd);
+
+ ret = bpf_tc_attach(NULL, &opts_hp);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid opts = NULL"))
+ return -EINVAL;
+
+ opts_hp.flags = 42;
+ ret = bpf_tc_attach(hook, &opts_hp);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid flags"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid prog_fd unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, &opts_hpi);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid prog_id set"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, &opts_pf);
+ if (!ASSERT_OK(ret, "bpf_tc_attach valid handle unset"))
+ return -EINVAL;
+ opts_pf.prog_fd = opts_pf.prog_id = 0;
+ ASSERT_OK(bpf_tc_detach(hook, &opts_pf), "bpf_tc_detach");
+
+ ret = bpf_tc_attach(hook, &opts_hf);
+ if (!ASSERT_OK(ret, "bpf_tc_attach valid priority unset"))
+ return -EINVAL;
+ opts_hf.prog_fd = opts_hf.prog_id = 0;
+ ASSERT_OK(bpf_tc_detach(hook, &opts_hf), "bpf_tc_detach");
+
+ ret = bpf_tc_attach(hook, &opts_prio_max);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid priority > UINT16_MAX"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, &opts_f);
+ if (!ASSERT_OK(ret, "bpf_tc_attach valid both handle and priority unset"))
+ return -EINVAL;
+ opts_f.prog_fd = opts_f.prog_id = 0;
+ ASSERT_OK(bpf_tc_detach(hook, &opts_f), "bpf_tc_detach");
+ }
+
+ return 0;
+}
+
+void test_tc_bpf(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX,
+ .attach_point = BPF_TC_INGRESS);
+ struct test_tc_bpf *skel = NULL;
+ bool hook_created = false;
+ int cls_fd, ret;
+
+ skel = test_tc_bpf__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tc_bpf__open_and_load"))
+ return;
+
+ cls_fd = bpf_program__fd(skel->progs.cls);
+
+ ret = bpf_tc_hook_create(&hook);
+ if (ret == 0)
+ hook_created = true;
+
+ ret = ret == -EEXIST ? 0 : ret;
+ if (!ASSERT_OK(ret, "bpf_tc_hook_create(BPF_TC_INGRESS)"))
+ goto end;
+
+ hook.attach_point = BPF_TC_CUSTOM;
+ hook.parent = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ ret = bpf_tc_hook_create(&hook);
+ if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_create invalid hook.attach_point"))
+ goto end;
+
+ ret = test_tc_bpf_basic(&hook, cls_fd);
+ if (!ASSERT_OK(ret, "test_tc_internal ingress"))
+ goto end;
+
+ ret = bpf_tc_hook_destroy(&hook);
+ if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_destroy invalid hook.attach_point"))
+ goto end;
+
+ hook.attach_point = BPF_TC_INGRESS;
+ hook.parent = 0;
+ bpf_tc_hook_destroy(&hook);
+
+ ret = test_tc_bpf_basic(&hook, cls_fd);
+ if (!ASSERT_OK(ret, "test_tc_internal ingress"))
+ goto end;
+
+ bpf_tc_hook_destroy(&hook);
+
+ hook.attach_point = BPF_TC_EGRESS;
+ ret = test_tc_bpf_basic(&hook, cls_fd);
+ if (!ASSERT_OK(ret, "test_tc_internal egress"))
+ goto end;
+
+ bpf_tc_hook_destroy(&hook);
+
+ ret = test_tc_bpf_api(&hook, cls_fd);
+ if (!ASSERT_OK(ret, "test_tc_bpf_api"))
+ goto end;
+
+ bpf_tc_hook_destroy(&hook);
+
+end:
+ if (hook_created) {
+ hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
+ bpf_tc_hook_destroy(&hook);
+ }
+ test_tc_bpf__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
index 39b0decb1bb2..d39bc00feb45 100644
--- a/tools/testing/selftests/bpf/prog_tests/trace_printk.c
+++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
@@ -3,7 +3,7 @@
#include <test_progs.h>
-#include "trace_printk.skel.h"
+#include "trace_printk.lskel.h"
#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe"
#define SEARCHMSG "testing,testing"
@@ -21,6 +21,9 @@ void test_trace_printk(void)
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
+ ASSERT_EQ(skel->rodata->fmt[0], 'T', "invalid printk fmt string");
+ skel->rodata->fmt[0] = 't';
+
err = trace_printk__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
index ee49493dc125..400fdf8d6233 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
@@ -9,8 +9,8 @@ __u32 map1_id = 0, map2_id = 0;
__u32 map1_accessed = 0, map2_accessed = 0;
__u64 map1_seqnum = 0, map2_seqnum1 = 0, map2_seqnum2 = 0;
-static volatile const __u32 print_len;
-static volatile const __u32 ret1;
+volatile const __u32 print_len;
+volatile const __u32 ret1;
SEC("iter/bpf_map")
int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c
index a46a264ce24e..55e283050cab 100644
--- a/tools/testing/selftests/bpf/progs/kfree_skb.c
+++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
@@ -109,10 +109,10 @@ int BPF_PROG(trace_kfree_skb, struct sk_buff *skb, void *location)
return 0;
}
-static volatile struct {
+struct {
bool fentry_test_ok;
bool fexit_test_ok;
-} result;
+} result = {};
SEC("fentry/eth_type_trans")
int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, struct net_device *dev,
diff --git a/tools/testing/selftests/bpf/progs/linked_maps1.c b/tools/testing/selftests/bpf/progs/linked_maps1.c
index 52291515cc72..00bf1ca95986 100644
--- a/tools/testing/selftests/bpf/progs/linked_maps1.c
+++ b/tools/testing/selftests/bpf/progs/linked_maps1.c
@@ -75,7 +75,7 @@ int BPF_PROG(handler_exit1)
val = bpf_map_lookup_elem(&map_weak, &key);
if (val)
output_weak1 = *val;
-
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/syscall.c b/tools/testing/selftests/bpf/progs/syscall.c
new file mode 100644
index 000000000000..e550f728962d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/syscall.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <../../../tools/include/linux/filter.h>
+#include <linux/btf.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct args {
+ __u64 log_buf;
+ __u32 log_size;
+ int max_entries;
+ int map_fd;
+ int prog_fd;
+ int btf_fd;
+};
+
+#define BTF_INFO_ENC(kind, kind_flag, vlen) \
+ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
+#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
+#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
+ ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
+#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
+ BTF_INT_ENC(encoding, bits_offset, bits)
+
+static int btf_load(void)
+{
+ struct btf_blob {
+ struct btf_header btf_hdr;
+ __u32 types[8];
+ __u32 str;
+ } raw_btf = {
+ .btf_hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = sizeof(__u32) * 8,
+ .str_off = sizeof(__u32) * 8,
+ .str_len = sizeof(__u32),
+ },
+ .types = {
+ /* long */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [1] */
+ /* unsigned long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ },
+ };
+ static union bpf_attr btf_load_attr = {
+ .btf_size = sizeof(raw_btf),
+ };
+
+ btf_load_attr.btf = (long)&raw_btf;
+ return bpf_sys_bpf(BPF_BTF_LOAD, &btf_load_attr, sizeof(btf_load_attr));
+}
+
+SEC("syscall")
+int bpf_prog(struct args *ctx)
+{
+ static char license[] = "GPL";
+ static struct bpf_insn insns[] = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ static union bpf_attr map_create_attr = {
+ .map_type = BPF_MAP_TYPE_HASH,
+ .key_size = 8,
+ .value_size = 8,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 2,
+ };
+ static union bpf_attr map_update_attr = { .map_fd = 1, };
+ static __u64 key = 12;
+ static __u64 value = 34;
+ static union bpf_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .insn_cnt = sizeof(insns) / sizeof(insns[0]),
+ };
+ int ret;
+
+ ret = btf_load();
+ if (ret <= 0)
+ return ret;
+
+ ctx->btf_fd = ret;
+ map_create_attr.max_entries = ctx->max_entries;
+ map_create_attr.btf_fd = ret;
+
+ prog_load_attr.license = (long) license;
+ prog_load_attr.insns = (long) insns;
+ prog_load_attr.log_buf = ctx->log_buf;
+ prog_load_attr.log_size = ctx->log_size;
+ prog_load_attr.log_level = 1;
+
+ ret = bpf_sys_bpf(BPF_MAP_CREATE, &map_create_attr, sizeof(map_create_attr));
+ if (ret <= 0)
+ return ret;
+ ctx->map_fd = ret;
+ insns[3].imm = ret;
+
+ map_update_attr.map_fd = ret;
+ map_update_attr.key = (long) &key;
+ map_update_attr.value = (long) &value;
+ ret = bpf_sys_bpf(BPF_MAP_UPDATE_ELEM, &map_update_attr, sizeof(map_update_attr));
+ if (ret < 0)
+ return ret;
+
+ ret = bpf_sys_bpf(BPF_PROG_LOAD, &prog_load_attr, sizeof(prog_load_attr));
+ if (ret <= 0)
+ return ret;
+ ctx->prog_fd = ret;
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/tailcall3.c b/tools/testing/selftests/bpf/progs/tailcall3.c
index 739dc2a51e74..910858fe078a 100644
--- a/tools/testing/selftests/bpf/progs/tailcall3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall3.c
@@ -10,7 +10,7 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int count;
+int count = 0;
SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/tailcall4.c b/tools/testing/selftests/bpf/progs/tailcall4.c
index f82075b47d7d..bd4be135c39d 100644
--- a/tools/testing/selftests/bpf/progs/tailcall4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall4.c
@@ -10,7 +10,7 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int selector;
+int selector = 0;
#define TAIL_FUNC(x) \
SEC("classifier/" #x) \
diff --git a/tools/testing/selftests/bpf/progs/tailcall5.c b/tools/testing/selftests/bpf/progs/tailcall5.c
index ce5450744fd4..adf30a33064e 100644
--- a/tools/testing/selftests/bpf/progs/tailcall5.c
+++ b/tools/testing/selftests/bpf/progs/tailcall5.c
@@ -10,7 +10,7 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int selector;
+int selector = 0;
#define TAIL_FUNC(x) \
SEC("classifier/" #x) \
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
index 7b1c04183824..3cc4c12817b5 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
@@ -20,7 +20,7 @@ int subprog_tail(struct __sk_buff *skb)
return 1;
}
-static volatile int count;
+int count = 0;
SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
index 9a1b166b7fbe..77df6d4db895 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
@@ -9,7 +9,7 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int count;
+int count = 0;
__noinline
int subprog_tail_2(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/test_check_mtu.c b/tools/testing/selftests/bpf/progs/test_check_mtu.c
index c4a9bae96e75..71184af57749 100644
--- a/tools/testing/selftests/bpf/progs/test_check_mtu.c
+++ b/tools/testing/selftests/bpf/progs/test_check_mtu.c
@@ -11,8 +11,8 @@
char _license[] SEC("license") = "GPL";
/* Userspace will update with MTU it can see on device */
-static volatile const int GLOBAL_USER_MTU;
-static volatile const __u32 GLOBAL_USER_IFINDEX;
+volatile const int GLOBAL_USER_MTU;
+volatile const __u32 GLOBAL_USER_IFINDEX;
/* BPF-prog will update these with MTU values it can see */
__u32 global_bpf_mtu_xdp = 0;
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index 3c1e042962e6..e2a5acc4785c 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -39,8 +39,8 @@ char _license[] SEC("license") = "Dual BSD/GPL";
/**
* Destination port and IP used for UDP encapsulation.
*/
-static volatile const __be16 ENCAPSULATION_PORT;
-static volatile const __be32 ENCAPSULATION_IP;
+volatile const __be16 ENCAPSULATION_PORT;
+volatile const __be32 ENCAPSULATION_IP;
typedef struct {
uint64_t processed_packets_total;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func_args.c b/tools/testing/selftests/bpf/progs/test_global_func_args.c
index cae309538a9e..e712bf77daae 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func_args.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func_args.c
@@ -8,7 +8,7 @@ struct S {
int v;
};
-static volatile struct S global_variable;
+struct S global_variable = {};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
diff --git a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
index ecbeea2df259..fc8e8a34a3db 100644
--- a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
+++ b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
@@ -5,7 +5,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-static volatile const struct {
+const struct {
unsigned a[4];
/*
* if the struct's size is multiple of 16, compiler will put it into
@@ -15,11 +15,11 @@ static volatile const struct {
char _y;
} rdonly_values = { .a = {2, 3, 4, 5} };
-static volatile struct {
+struct {
unsigned did_run;
unsigned iters;
unsigned sum;
-} res;
+} res = {};
SEC("raw_tracepoint/sys_enter:skip_loop")
int skip_loop(struct pt_regs *ctx)
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf.c b/tools/testing/selftests/bpf/progs/test_ringbuf.c
index 6b3f288b7c63..eaa7d9dba0be 100644
--- a/tools/testing/selftests/bpf/progs/test_ringbuf.c
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf.c
@@ -35,7 +35,7 @@ long prod_pos = 0;
/* inner state */
long seq = 0;
-SEC("tp/syscalls/sys_enter_getpgid")
+SEC("fentry/__x64_sys_getpgid")
int test_ringbuf(void *ctx)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
@@ -48,7 +48,7 @@ int test_ringbuf(void *ctx)
sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
if (!sample) {
__sync_fetch_and_add(&dropped, 1);
- return 1;
+ return 0;
}
sample->pid = pid;
diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c
index 374ccef704e1..441fa1c552c8 100644
--- a/tools/testing/selftests/bpf/progs/test_skeleton.c
+++ b/tools/testing/selftests/bpf/progs/test_skeleton.c
@@ -38,11 +38,11 @@ extern int LINUX_KERNEL_VERSION __kconfig;
bool bpf_syscall = 0;
int kern_ver = 0;
+struct s out5 = {};
+
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
- static volatile struct s out5;
-
out1 = in1;
out2 = in2;
out3 = in3;
diff --git a/tools/testing/selftests/bpf/progs/test_snprintf_single.c b/tools/testing/selftests/bpf/progs/test_snprintf_single.c
index 402adaf344f9..3095837334d3 100644
--- a/tools/testing/selftests/bpf/progs/test_snprintf_single.c
+++ b/tools/testing/selftests/bpf/progs/test_snprintf_single.c
@@ -5,7 +5,7 @@
#include <bpf/bpf_helpers.h>
/* The format string is filled from the userspace such that loading fails */
-static const char fmt[10];
+const char fmt[10];
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
index a39eba9f5201..a1cc58b10c7c 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
@@ -28,8 +28,8 @@ struct {
__type(value, unsigned int);
} verdict_map SEC(".maps");
-static volatile bool test_sockmap; /* toggled by user-space */
-static volatile bool test_ingress; /* toggled by user-space */
+bool test_sockmap = false; /* toggled by user-space */
+bool test_ingress = false; /* toggled by user-space */
SEC("sk_skb/stream_parser")
int prog_stream_parser(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/test_static_linked1.c b/tools/testing/selftests/bpf/progs/test_static_linked1.c
index ea1a6c4c7172..4f0b612e1661 100644
--- a/tools/testing/selftests/bpf/progs/test_static_linked1.c
+++ b/tools/testing/selftests/bpf/progs/test_static_linked1.c
@@ -4,10 +4,10 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-/* 8-byte aligned .bss */
-static volatile long static_var1;
-static volatile int static_var11;
-int var1 = 0;
+/* 8-byte aligned .data */
+static volatile long static_var1 = 2;
+static volatile int static_var2 = 3;
+int var1 = -1;
/* 4-byte aligned .rodata */
const volatile int rovar1;
@@ -21,7 +21,7 @@ static __noinline int subprog(int x)
SEC("raw_tp/sys_enter")
int handler1(const void *ctx)
{
- var1 = subprog(rovar1) + static_var1 + static_var11;
+ var1 = subprog(rovar1) + static_var1 + static_var2;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_static_linked2.c b/tools/testing/selftests/bpf/progs/test_static_linked2.c
index 54d8d1ab577c..766ebd502a60 100644
--- a/tools/testing/selftests/bpf/progs/test_static_linked2.c
+++ b/tools/testing/selftests/bpf/progs/test_static_linked2.c
@@ -4,10 +4,10 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-/* 4-byte aligned .bss */
-static volatile int static_var2;
-static volatile int static_var22;
-int var2 = 0;
+/* 4-byte aligned .data */
+static volatile int static_var1 = 5;
+static volatile int static_var2 = 6;
+int var2 = -1;
/* 8-byte aligned .rodata */
const volatile long rovar2;
@@ -21,7 +21,7 @@ static __noinline int subprog(int x)
SEC("raw_tp/sys_enter")
int handler2(const void *ctx)
{
- var2 = subprog(rovar2) + static_var2 + static_var22;
+ var2 = subprog(rovar2) + static_var1 + static_var2;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs.c b/tools/testing/selftests/bpf/progs/test_subprogs.c
index d3c5673c0218..b7c37ca09544 100644
--- a/tools/testing/selftests/bpf/progs/test_subprogs.c
+++ b/tools/testing/selftests/bpf/progs/test_subprogs.c
@@ -4,8 +4,18 @@
const char LICENSE[] SEC("license") = "GPL";
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array SEC(".maps");
+
__noinline int sub1(int x)
{
+ int key = 0;
+
+ bpf_map_lookup_elem(&array, &key);
return x + 1;
}
@@ -23,6 +33,9 @@ static __noinline int sub3(int z)
static __noinline int sub4(int w)
{
+ int key = 0;
+
+ bpf_map_lookup_elem(&array, &key);
return w + sub3(5) + sub1(6);
}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_bpf.c b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
new file mode 100644
index 000000000000..18a3a7ed924a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* Dummy prog to test TC-BPF API */
+
+SEC("classifier")
+int cls(struct __sk_buff *skb)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/trace_printk.c b/tools/testing/selftests/bpf/progs/trace_printk.c
index 8ca7f399b670..119582aa105a 100644
--- a/tools/testing/selftests/bpf/progs/trace_printk.c
+++ b/tools/testing/selftests/bpf/progs/trace_printk.c
@@ -10,11 +10,11 @@ char _license[] SEC("license") = "GPL";
int trace_printk_ret = 0;
int trace_printk_ran = 0;
-SEC("tp/raw_syscalls/sys_enter")
+const char fmt[] = "Testing,testing %d\n";
+
+SEC("fentry/__x64_sys_nanosleep")
int sys_enter(void *ctx)
{
- static const char fmt[] = "testing,testing %d\n";
-
trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt),
++trace_printk_ran);
return 0;
diff --git a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
index 65f43a7ce9c9..1e9a4aff76a2 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
@@ -7,6 +7,8 @@
PORT_NUM_NETIFS=0
+declare -a unsplit
+
port_setup_prepare()
{
:
@@ -20,12 +22,12 @@ port_cleanup()
devlink port unsplit $port
check_err $? "Did not unsplit $netdev"
done
+ unsplit=()
}
split_all_ports()
{
local should_fail=$1; shift
- local -a unsplit
# Loop over the splittable netdevs and create tuples of netdev along
# with its width. For example:
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
index 27de3d9ed08e..f4493ef9cca1 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
@@ -29,37 +29,38 @@ cleanup()
get_prio_pg()
{
- __mlnx_qos -i $swp | sed -n '/^PFC/,/^[^[:space:]]/p' |
- grep buffer | sed 's/ \+/ /g' | cut -d' ' -f 2-
+ # Produces a string of numbers "<B0> <B1> ... <B7> ", where BX is number
+ # of buffer that priority X is mapped to.
+ dcb -j buffer show dev $swp |
+ jq -r '[.prio_buffer | .[] | tostring + " "] | add'
}
get_prio_pfc()
{
- __mlnx_qos -i $swp | sed -n '/^PFC/,/^[^[:space:]]/p' |
- grep enabled | sed 's/ \+/ /g' | cut -d' ' -f 2-
+ # Produces a string of numbers "<P0> <P1> ... <P7> ", where PX denotes
+ # whether priority X has PFC enabled (the value is 1) or disabled (0).
+ dcb -j pfc show dev $swp |
+ jq -r '[.prio_pfc | .[] | if . then "1 " else "0 " end] | add'
}
get_prio_tc()
{
- __mlnx_qos -i $swp | sed -n '/^tc/,$p' |
- awk '/^tc/ { TC = $2 }
- /priority:/ { PRIO[$2]=TC }
- END {
- for (i in PRIO)
- printf("%d ", PRIO[i])
- }'
+ # Produces a string of numbers "<T0> <T1> ... <T7> ", where TC is number
+ # of TC that priority X is mapped to.
+ dcb -j ets show dev $swp |
+ jq -r '[.prio_tc | .[] | tostring + " "] | add'
}
get_buf_size()
{
local idx=$1; shift
- __mlnx_qos -i $swp | grep Receive | sed 's/.*: //' | cut -d, -f $((idx + 1))
+ dcb -j buffer show dev $swp | jq ".buffer_size[$idx]"
}
get_tot_size()
{
- __mlnx_qos -i $swp | grep Receive | sed 's/.*total_size=//'
+ dcb -j buffer show dev $swp | jq '.total_size'
}
check_prio_pg()
@@ -121,18 +122,18 @@ test_dcb_ets()
{
RET=0
- __mlnx_qos -i $swp --prio_tc=0,2,4,6,1,3,5,7 > /dev/null
+ dcb ets set dev $swp prio-tc 0:0 1:2 2:4 3:6 4:1 5:3 6:5 7:7
check_prio_pg "0 2 4 6 1 3 5 7 "
check_prio_tc "0 2 4 6 1 3 5 7 "
check_prio_pfc "0 0 0 0 0 0 0 0 "
- __mlnx_qos -i $swp --prio_tc=0,0,0,0,0,0,0,0 > /dev/null
+ dcb ets set dev $swp prio-tc all:0
check_prio_pg "0 0 0 0 0 0 0 0 "
check_prio_tc "0 0 0 0 0 0 0 0 "
- __mlnx_qos -i $swp --prio2buffer=1,3,5,7,0,2,4,6 &> /dev/null
+ dcb buffer set dev $swp prio-buffer 0:1 1:3 2:5 3:7 4:0 5:2 6:4 7:6 2>/dev/null
check_fail $? "prio2buffer accepted in DCB mode"
log_test "Configuring headroom through ETS"
@@ -174,7 +175,7 @@ test_pfc()
{
RET=0
- __mlnx_qos -i $swp --prio_tc=0,0,0,0,0,1,2,3 > /dev/null
+ dcb ets set dev $swp prio-tc all:0 5:1 6:2 7:3
local buf0size=$(get_buf_size 0)
local buf1size=$(get_buf_size 1)
@@ -193,7 +194,7 @@ test_pfc()
RET=0
- __mlnx_qos -i $swp --pfc=0,0,0,0,0,1,1,1 --cable_len=0 > /dev/null
+ dcb pfc set dev $swp prio-pfc all:off 5:on 6:on 7:on delay 0
check_prio_pg "0 0 0 0 0 1 2 3 "
check_prio_pfc "0 0 0 0 0 1 1 1 "
@@ -210,7 +211,7 @@ test_pfc()
RET=0
- __mlnx_qos -i $swp --pfc=0,0,0,0,0,1,1,1 --cable_len=1000 > /dev/null
+ dcb pfc set dev $swp delay 1000
check_buf_size 0 "== $buf0size"
check_buf_size 1 "> $buf1size"
@@ -221,8 +222,8 @@ test_pfc()
RET=0
- __mlnx_qos -i $swp --pfc=0,0,0,0,0,0,0,0 --cable_len=0 > /dev/null
- __mlnx_qos -i $swp --prio_tc=0,0,0,0,0,0,0,0 > /dev/null
+ dcb pfc set dev $swp prio-pfc all:off delay 0
+ dcb ets set dev $swp prio-tc all:0
check_prio_pg "0 0 0 0 0 0 0 0 "
check_prio_tc "0 0 0 0 0 0 0 0 "
@@ -242,13 +243,13 @@ test_tc_priomap()
{
RET=0
- __mlnx_qos -i $swp --prio_tc=0,1,2,3,4,5,6,7 > /dev/null
+ dcb ets set dev $swp prio-tc 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
check_prio_pg "0 1 2 3 4 5 6 7 "
tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
check_prio_pg "0 0 0 0 0 0 0 0 "
- __mlnx_qos -i $swp --prio2buffer=1,3,5,7,0,2,4,6 > /dev/null
+ dcb buffer set dev $swp prio-buffer 0:1 1:3 2:5 3:7 4:0 5:2 6:4 7:6
check_prio_pg "1 3 5 7 0 2 4 6 "
tc qdisc delete dev $swp root
@@ -256,9 +257,9 @@ test_tc_priomap()
# Clean up.
tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
- __mlnx_qos -i $swp --prio2buffer=0,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp prio-buffer all:0
tc qdisc delete dev $swp root
- __mlnx_qos -i $swp --prio_tc=0,0,0,0,0,0,0,0 > /dev/null
+ dcb ets set dev $swp prio-tc all:0
log_test "TC: priomap"
}
@@ -270,12 +271,12 @@ test_tc_sizes()
RET=0
- __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 &> /dev/null
+ dcb buffer set dev $swp buffer-size all:0 0:$size 2>/dev/null
check_fail $? "buffer_size should fail before qdisc is added"
tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
- __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0 0:$size
check_err $? "buffer_size should pass after qdisc is added"
check_buf_size 0 "== $size" "set size: "
@@ -283,26 +284,26 @@ test_tc_sizes()
check_buf_size 0 "== $size" "set MTU: "
mtu_restore $swp
- __mlnx_qos -i $swp --buffer_size=0,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0
# After replacing the qdisc for the same kind, buffer_size still has to
# work.
tc qdisc replace dev $swp root handle 1: bfifo limit 1M
- __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0 0:$size
check_buf_size 0 "== $size" "post replace, set size: "
- __mlnx_qos -i $swp --buffer_size=0,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0
# Likewise after replacing for a different kind.
tc qdisc replace dev $swp root handle 2: prio bands 8
- __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0 0:$size
check_buf_size 0 "== $size" "post replace different kind, set size: "
tc qdisc delete dev $swp root
- __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 &> /dev/null
+ dcb buffer set dev $swp buffer-size all:0 0:$size 2>/dev/null
check_fail $? "buffer_size should fail after qdisc is deleted"
log_test "TC: buffer size"
@@ -363,10 +364,10 @@ test_tc_int_buf()
tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
test_int_buf "TC: "
- __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0 0:$size
test_int_buf "TC+buffsize: "
- __mlnx_qos -i $swp --buffer_size=0,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0
tc qdisc delete dev $swp root
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh
index 0bf76f13c030..faa51012cdac 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh
@@ -82,17 +82,3 @@ bail_on_lldpad()
fi
fi
}
-
-__mlnx_qos()
-{
- local err
-
- mlnx_qos "$@" 2>/dev/null
- err=$?
-
- if ((err)); then
- echo "Error ($err) in mlnx_qos $@" >/dev/stderr
- fi
-
- return $err
-}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
index 5c7700212f75..5d5622fc2758 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
@@ -171,7 +171,7 @@ switch_create()
# assignment.
tc qdisc replace dev $swp1 root handle 1: \
ets bands 8 strict 8 priomap 7 6
- __mlnx_qos -i $swp1 --prio2buffer=0,1,0,0,0,0,0,0 >/dev/null
+ dcb buffer set dev $swp1 prio-buffer all:0 1:1
# $swp2
# -----
@@ -209,8 +209,8 @@ switch_create()
# the lossless prio into a buffer of its own. Don't bother with buffer
# sizes though, there is not going to be any pressure in the "backward"
# direction.
- __mlnx_qos -i $swp3 --prio2buffer=0,1,0,0,0,0,0,0 >/dev/null
- __mlnx_qos -i $swp3 --pfc=0,1,0,0,0,0,0,0 >/dev/null
+ dcb buffer set dev $swp3 prio-buffer all:0 1:1
+ dcb pfc set dev $swp3 prio-pfc all:off 1:on
# $swp4
# -----
@@ -226,11 +226,11 @@ switch_create()
# Configure qdisc so that we can hand-tune headroom.
tc qdisc replace dev $swp4 root handle 1: \
ets bands 8 strict 8 priomap 7 6
- __mlnx_qos -i $swp4 --prio2buffer=0,1,0,0,0,0,0,0 >/dev/null
- __mlnx_qos -i $swp4 --pfc=0,1,0,0,0,0,0,0 >/dev/null
+ dcb buffer set dev $swp4 prio-buffer all:0 1:1
+ dcb pfc set dev $swp4 prio-pfc all:off 1:on
# PG0 will get autoconfigured to Xoff, give PG1 arbitrarily 100K, which
# is (-2*MTU) about 80K of delay provision.
- __mlnx_qos -i $swp4 --buffer_size=0,$_100KB,0,0,0,0,0,0 >/dev/null
+ dcb buffer set dev $swp4 buffer-size all:0 1:$_100KB
# bridges
# -------
@@ -273,9 +273,9 @@ switch_destroy()
# $swp4
# -----
- __mlnx_qos -i $swp4 --buffer_size=0,0,0,0,0,0,0,0 >/dev/null
- __mlnx_qos -i $swp4 --pfc=0,0,0,0,0,0,0,0 >/dev/null
- __mlnx_qos -i $swp4 --prio2buffer=0,0,0,0,0,0,0,0 >/dev/null
+ dcb buffer set dev $swp4 buffer-size all:0
+ dcb pfc set dev $swp4 prio-pfc all:off
+ dcb buffer set dev $swp4 prio-buffer all:0
tc qdisc del dev $swp4 root
devlink_tc_bind_pool_th_restore $swp4 1 ingress
@@ -288,8 +288,8 @@ switch_destroy()
# $swp3
# -----
- __mlnx_qos -i $swp3 --pfc=0,0,0,0,0,0,0,0 >/dev/null
- __mlnx_qos -i $swp3 --prio2buffer=0,0,0,0,0,0,0,0 >/dev/null
+ dcb pfc set dev $swp3 prio-pfc all:off
+ dcb buffer set dev $swp3 prio-buffer all:0
tc qdisc del dev $swp3 root
devlink_tc_bind_pool_th_restore $swp3 1 egress
@@ -315,7 +315,7 @@ switch_destroy()
# $swp1
# -----
- __mlnx_qos -i $swp1 --prio2buffer=0,0,0,0,0,0,0,0 >/dev/null
+ dcb buffer set dev $swp1 prio-buffer all:0
tc qdisc del dev $swp1 root
devlink_tc_bind_pool_th_restore $swp1 1 ingress
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh
index 093bed088ad0..373d5f2a846e 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh
@@ -234,15 +234,15 @@ __tc_sample_rate_test()
psample_capture_start
- ip vrf exec v$h1 $MZ $h1 -c 3200 -d 1msec -p 64 -A 192.0.2.1 \
+ ip vrf exec v$h1 $MZ $h1 -c 320000 -d 100usec -p 64 -A 192.0.2.1 \
-B $dip -t udp dp=52768,sp=42768 -q
psample_capture_stop
pkts=$(grep -e "group 1 " $CAPTURE_FILE | wc -l)
- pct=$((100 * (pkts - 100) / 100))
+ pct=$((100 * (pkts - 10000) / 10000))
(( -25 <= pct && pct <= 25))
- check_err $? "Expected 100 packets, got $pkts packets, which is $pct% off. Required accuracy is +-25%"
+ check_err $? "Expected 10000 packets, got $pkts packets, which is $pct% off. Required accuracy is +-25%"
log_test "tc sample rate ($desc)"
@@ -587,15 +587,15 @@ __tc_sample_acl_rate_test()
psample_capture_start
- ip vrf exec v$h1 $MZ $h1 -c 3200 -d 1msec -p 64 -A 192.0.2.1 \
+ ip vrf exec v$h1 $MZ $h1 -c 320000 -d 100usec -p 64 -A 192.0.2.1 \
-B 198.51.100.1 -t udp dp=52768,sp=42768 -q
psample_capture_stop
pkts=$(grep -e "group 1 " $CAPTURE_FILE | wc -l)
- pct=$((100 * (pkts - 100) / 100))
+ pct=$((100 * (pkts - 10000) / 10000))
(( -25 <= pct && pct <= 25))
- check_err $? "Expected 100 packets, got $pkts packets, which is $pct% off. Required accuracy is +-25%"
+ check_err $? "Expected 10000 packets, got $pkts packets, which is $pct% off. Required accuracy is +-25%"
# Setup a filter that should not match any packet and make sure packets
# are not sampled.
diff --git a/tools/testing/selftests/net/devlink_port_split.py b/tools/testing/selftests/net/devlink_port_split.py
index 834066d465fc..2b5d6ff87373 100755
--- a/tools/testing/selftests/net/devlink_port_split.py
+++ b/tools/testing/selftests/net/devlink_port_split.py
@@ -18,6 +18,8 @@ import sys
#
+# Kselftest framework requirement - SKIP code is 4
+KSFT_SKIP=4
Port = collections.namedtuple('Port', 'bus_info name')
@@ -239,7 +241,11 @@ def main(cmdline=None):
assert stderr == ""
devs = json.loads(stdout)['dev']
- dev = list(devs.keys())[0]
+ if devs:
+ dev = list(devs.keys())[0]
+ else:
+ print("no devlink device was found, test skipped")
+ sys.exit(KSFT_SKIP)
cmd = "devlink dev show %s" % dev
stdout, stderr = run_command(cmd)
diff --git a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
new file mode 100755
index 000000000000..a15d21dc035a
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
@@ -0,0 +1,364 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test traffic distribution between two paths when using custom hash policy.
+#
+# +--------------------------------+
+# | H1 |
+# | $h1 + |
+# | 198.51.100.{2-253}/24 | |
+# | 2001:db8:1::{2-fd}/64 | |
+# +-------------------------|------+
+# |
+# +-------------------------|-------------------------+
+# | SW1 | |
+# | $rp1 + |
+# | 198.51.100.1/24 |
+# | 2001:db8:1::1/64 |
+# | |
+# | |
+# | $rp11 + + $rp12 |
+# | 192.0.2.1/28 | | 192.0.2.17/28 |
+# | 2001:db8:2::1/64 | | 2001:db8:3::1/64 |
+# +------------------|-------------|------------------+
+# | |
+# +------------------|-------------|------------------+
+# | SW2 | | |
+# | | | |
+# | $rp21 + + $rp22 |
+# | 192.0.2.2/28 192.0.2.18/28 |
+# | 2001:db8:2::2/64 2001:db8:3::2/64 |
+# | |
+# | |
+# | $rp2 + |
+# | 203.0.113.1/24 | |
+# | 2001:db8:4::1/64 | |
+# +-------------------------|-------------------------+
+# |
+# +-------------------------|------+
+# | H2 | |
+# | $h2 + |
+# | 203.0.113.{2-253}/24 |
+# | 2001:db8:4::{2-fd}/64 |
+# +--------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ custom_hash
+"
+
+NUM_NETIFS=8
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 198.51.100.2/24 2001:db8:1::2/64
+ ip route add vrf v$h1 default via 198.51.100.1 dev $h1
+ ip -6 route add vrf v$h1 default via 2001:db8:1::1 dev $h1
+}
+
+h1_destroy()
+{
+ ip -6 route del vrf v$h1 default
+ ip route del vrf v$h1 default
+ simple_if_fini $h1 198.51.100.2/24 2001:db8:1::2/64
+}
+
+sw1_create()
+{
+ simple_if_init $rp1 198.51.100.1/24 2001:db8:1::1/64
+ __simple_if_init $rp11 v$rp1 192.0.2.1/28 2001:db8:2::1/64
+ __simple_if_init $rp12 v$rp1 192.0.2.17/28 2001:db8:3::1/64
+
+ ip route add vrf v$rp1 203.0.113.0/24 \
+ nexthop via 192.0.2.2 dev $rp11 \
+ nexthop via 192.0.2.18 dev $rp12
+
+ ip -6 route add vrf v$rp1 2001:db8:4::/64 \
+ nexthop via 2001:db8:2::2 dev $rp11 \
+ nexthop via 2001:db8:3::2 dev $rp12
+}
+
+sw1_destroy()
+{
+ ip -6 route del vrf v$rp1 2001:db8:4::/64
+
+ ip route del vrf v$rp1 203.0.113.0/24
+
+ __simple_if_fini $rp12 192.0.2.17/28 2001:db8:3::1/64
+ __simple_if_fini $rp11 192.0.2.1/28 2001:db8:2::1/64
+ simple_if_fini $rp1 198.51.100.1/24 2001:db8:1::1/64
+}
+
+sw2_create()
+{
+ simple_if_init $rp2 203.0.113.1/24 2001:db8:4::1/64
+ __simple_if_init $rp21 v$rp2 192.0.2.2/28 2001:db8:2::2/64
+ __simple_if_init $rp22 v$rp2 192.0.2.18/28 2001:db8:3::2/64
+
+ ip route add vrf v$rp2 198.51.100.0/24 \
+ nexthop via 192.0.2.1 dev $rp21 \
+ nexthop via 192.0.2.17 dev $rp22
+
+ ip -6 route add vrf v$rp2 2001:db8:1::/64 \
+ nexthop via 2001:db8:2::1 dev $rp21 \
+ nexthop via 2001:db8:3::1 dev $rp22
+}
+
+sw2_destroy()
+{
+ ip -6 route del vrf v$rp2 2001:db8:1::/64
+
+ ip route del vrf v$rp2 198.51.100.0/24
+
+ __simple_if_fini $rp22 192.0.2.18/28 2001:db8:3::2/64
+ __simple_if_fini $rp21 192.0.2.2/28 2001:db8:2::2/64
+ simple_if_fini $rp2 203.0.113.1/24 2001:db8:4::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 203.0.113.2/24 2001:db8:4::2/64
+ ip route add vrf v$h2 default via 203.0.113.1 dev $h2
+ ip -6 route add vrf v$h2 default via 2001:db8:4::1 dev $h2
+}
+
+h2_destroy()
+{
+ ip -6 route del vrf v$h2 default
+ ip route del vrf v$h2 default
+ simple_if_fini $h2 203.0.113.2/24 2001:db8:4::2/64
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+
+ rp1=${NETIFS[p2]}
+
+ rp11=${NETIFS[p3]}
+ rp21=${NETIFS[p4]}
+
+ rp12=${NETIFS[p5]}
+ rp22=${NETIFS[p6]}
+
+ rp2=${NETIFS[p7]}
+
+ h2=${NETIFS[p8]}
+
+ vrf_prepare
+ h1_create
+ sw1_create
+ sw2_create
+ h2_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ h2_destroy
+ sw2_destroy
+ sw1_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 203.0.113.2
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:4::2
+}
+
+send_src_ipv4()
+{
+ $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_dst_ipv4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_src_udp4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ -d 1msec -t udp "sp=0-32768,dp=30000"
+}
+
+send_dst_udp4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ -d 1msec -t udp "sp=20000,dp=0-32768"
+}
+
+send_src_ipv6()
+{
+ $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:4::2 \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_dst_ipv6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:4::2-2001:db8:4::fd" \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_flowlabel()
+{
+ # Generate 16384 echo requests, each with a random flow label.
+ for _ in $(seq 1 16384); do
+ ip vrf exec v$h1 \
+ $PING6 2001:db8:4::2 -F 0 -c 1 -q >/dev/null 2>&1
+ done
+}
+
+send_src_udp6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:4::2 \
+ -d 1msec -t udp "sp=0-32768,dp=30000"
+}
+
+send_dst_udp6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:4::2 \
+ -d 1msec -t udp "sp=20000,dp=0-32768"
+}
+
+custom_hash_test()
+{
+ local field="$1"; shift
+ local balanced="$1"; shift
+ local send_flows="$@"
+
+ RET=0
+
+ local t0_rp11=$(link_stats_tx_packets_get $rp11)
+ local t0_rp12=$(link_stats_tx_packets_get $rp12)
+
+ $send_flows
+
+ local t1_rp11=$(link_stats_tx_packets_get $rp11)
+ local t1_rp12=$(link_stats_tx_packets_get $rp12)
+
+ local d_rp11=$((t1_rp11 - t0_rp11))
+ local d_rp12=$((t1_rp12 - t0_rp12))
+
+ local diff=$((d_rp12 - d_rp11))
+ local sum=$((d_rp11 + d_rp12))
+
+ local pct=$(echo "$diff / $sum * 100" | bc -l)
+ local is_balanced=$(echo "-20 <= $pct && $pct <= 20" | bc)
+
+ [[ ( $is_balanced -eq 1 && $balanced == "balanced" ) ||
+ ( $is_balanced -eq 0 && $balanced == "unbalanced" ) ]]
+ check_err $? "Expected traffic to be $balanced, but it is not"
+
+ log_test "Multipath hash field: $field ($balanced)"
+ log_info "Packets sent on path1 / path2: $d_rp11 / $d_rp12"
+}
+
+custom_hash_v4()
+{
+ log_info "Running IPv4 custom multipath hash tests"
+
+ sysctl_set net.ipv4.fib_multipath_hash_policy 3
+
+ # Prevent the neighbour table from overflowing, as different neighbour
+ # entries will be created on $ol4 when using different destination IPs.
+ sysctl_set net.ipv4.neigh.default.gc_thresh1 1024
+ sysctl_set net.ipv4.neigh.default.gc_thresh2 1024
+ sysctl_set net.ipv4.neigh.default.gc_thresh3 1024
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0001
+ custom_hash_test "Source IP" "balanced" send_src_ipv4
+ custom_hash_test "Source IP" "unbalanced" send_dst_ipv4
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0002
+ custom_hash_test "Destination IP" "balanced" send_dst_ipv4
+ custom_hash_test "Destination IP" "unbalanced" send_src_ipv4
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0010
+ custom_hash_test "Source port" "balanced" send_src_udp4
+ custom_hash_test "Source port" "unbalanced" send_dst_udp4
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0020
+ custom_hash_test "Destination port" "balanced" send_dst_udp4
+ custom_hash_test "Destination port" "unbalanced" send_src_udp4
+
+ sysctl_restore net.ipv4.neigh.default.gc_thresh3
+ sysctl_restore net.ipv4.neigh.default.gc_thresh2
+ sysctl_restore net.ipv4.neigh.default.gc_thresh1
+
+ sysctl_restore net.ipv4.fib_multipath_hash_policy
+}
+
+custom_hash_v6()
+{
+ log_info "Running IPv6 custom multipath hash tests"
+
+ sysctl_set net.ipv6.fib_multipath_hash_policy 3
+
+ # Prevent the neighbour table from overflowing, as different neighbour
+ # entries will be created on $ol4 when using different destination IPs.
+ sysctl_set net.ipv6.neigh.default.gc_thresh1 1024
+ sysctl_set net.ipv6.neigh.default.gc_thresh2 1024
+ sysctl_set net.ipv6.neigh.default.gc_thresh3 1024
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0001
+ custom_hash_test "Source IP" "balanced" send_src_ipv6
+ custom_hash_test "Source IP" "unbalanced" send_dst_ipv6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0002
+ custom_hash_test "Destination IP" "balanced" send_dst_ipv6
+ custom_hash_test "Destination IP" "unbalanced" send_src_ipv6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0008
+ custom_hash_test "Flowlabel" "balanced" send_flowlabel
+ custom_hash_test "Flowlabel" "unbalanced" send_src_ipv6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0010
+ custom_hash_test "Source port" "balanced" send_src_udp6
+ custom_hash_test "Source port" "unbalanced" send_dst_udp6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0020
+ custom_hash_test "Destination port" "balanced" send_dst_udp6
+ custom_hash_test "Destination port" "unbalanced" send_src_udp6
+
+ sysctl_restore net.ipv6.neigh.default.gc_thresh3
+ sysctl_restore net.ipv6.neigh.default.gc_thresh2
+ sysctl_restore net.ipv6.neigh.default.gc_thresh1
+
+ sysctl_restore net.ipv6.fib_multipath_hash_policy
+}
+
+custom_hash()
+{
+ # Test that when the hash policy is set to custom, traffic is
+ # distributed only according to the fields set in the
+ # fib_multipath_hash_fields sysctl.
+ #
+ # Each time set a different field and make sure traffic is only
+ # distributed when the field is changed in the packet stream.
+ custom_hash_v4
+ custom_hash_v6
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
new file mode 100755
index 000000000000..a73f52efcb6c
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
@@ -0,0 +1,456 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test traffic distribution when there are multiple paths between an IPv4 GRE
+# tunnel. The tunnel carries IPv4 and IPv6 traffic between multiple hosts.
+# Multiple routes are in the underlay network. With the default multipath
+# policy, SW2 will only look at the outer IP addresses, hence only a single
+# route would be used.
+#
+# +--------------------------------+
+# | H1 |
+# | $h1 + |
+# | 198.51.100.{2-253}/24 | |
+# | 2001:db8:1::{2-fd}/64 | |
+# +-------------------------|------+
+# |
+# +-------------------------|------------------+
+# | SW1 | |
+# | $ol1 + |
+# | 198.51.100.1/24 |
+# | 2001:db8:1::1/64 |
+# | |
+# | + g1 (gre) |
+# | loc=192.0.2.1 |
+# | rem=192.0.2.2 --. |
+# | tos=inherit | |
+# | v |
+# | + $ul1 |
+# | | 192.0.2.17/28 |
+# +---------------------|----------------------+
+# |
+# +---------------------|----------------------+
+# | SW2 | |
+# | $ul21 + |
+# | 192.0.2.18/28 | |
+# | | |
+# ! __________________+___ |
+# | / \ |
+# | | | |
+# | + $ul22.111 (vlan) + $ul22.222 (vlan) |
+# | | 192.0.2.33/28 | 192.0.2.49/28 |
+# | | | |
+# +--|----------------------|------------------+
+# | |
+# +--|----------------------|------------------+
+# | | | |
+# | + $ul32.111 (vlan) + $ul32.222 (vlan) |
+# | | 192.0.2.34/28 | 192.0.2.50/28 |
+# | | | |
+# | \__________________+___/ |
+# | | |
+# | | |
+# | $ul31 + |
+# | 192.0.2.65/28 | SW3 |
+# +---------------------|----------------------+
+# |
+# +---------------------|----------------------+
+# | + $ul4 |
+# | ^ 192.0.2.66/28 |
+# | | |
+# | + g2 (gre) | |
+# | loc=192.0.2.2 | |
+# | rem=192.0.2.1 --' |
+# | tos=inherit |
+# | |
+# | $ol4 + |
+# | 203.0.113.1/24 | |
+# | 2001:db8:2::1/64 | SW4 |
+# +-------------------------|------------------+
+# |
+# +-------------------------|------+
+# | | |
+# | $h2 + |
+# | 203.0.113.{2-253}/24 |
+# | 2001:db8:2::{2-fd}/64 H2 |
+# +--------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ custom_hash
+"
+
+NUM_NETIFS=10
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 198.51.100.2/24 2001:db8:1::2/64
+ ip route add vrf v$h1 default via 198.51.100.1 dev $h1
+ ip -6 route add vrf v$h1 default via 2001:db8:1::1 dev $h1
+}
+
+h1_destroy()
+{
+ ip -6 route del vrf v$h1 default
+ ip route del vrf v$h1 default
+ simple_if_fini $h1 198.51.100.2/24 2001:db8:1::2/64
+}
+
+sw1_create()
+{
+ simple_if_init $ol1 198.51.100.1/24 2001:db8:1::1/64
+ __simple_if_init $ul1 v$ol1 192.0.2.17/28
+
+ tunnel_create g1 gre 192.0.2.1 192.0.2.2 tos inherit dev v$ol1
+ __simple_if_init g1 v$ol1 192.0.2.1/32
+ ip route add vrf v$ol1 192.0.2.2/32 via 192.0.2.18
+
+ ip route add vrf v$ol1 203.0.113.0/24 dev g1
+ ip -6 route add vrf v$ol1 2001:db8:2::/64 dev g1
+}
+
+sw1_destroy()
+{
+ ip -6 route del vrf v$ol1 2001:db8:2::/64
+ ip route del vrf v$ol1 203.0.113.0/24
+
+ ip route del vrf v$ol1 192.0.2.2/32
+ __simple_if_fini g1 192.0.2.1/32
+ tunnel_destroy g1
+
+ __simple_if_fini $ul1 192.0.2.17/28
+ simple_if_fini $ol1 198.51.100.1/24 2001:db8:1::1/64
+}
+
+sw2_create()
+{
+ simple_if_init $ul21 192.0.2.18/28
+ __simple_if_init $ul22 v$ul21
+ vlan_create $ul22 111 v$ul21 192.0.2.33/28
+ vlan_create $ul22 222 v$ul21 192.0.2.49/28
+
+ ip route add vrf v$ul21 192.0.2.1/32 via 192.0.2.17
+ ip route add vrf v$ul21 192.0.2.2/32 \
+ nexthop via 192.0.2.34 \
+ nexthop via 192.0.2.50
+}
+
+sw2_destroy()
+{
+ ip route del vrf v$ul21 192.0.2.2/32
+ ip route del vrf v$ul21 192.0.2.1/32
+
+ vlan_destroy $ul22 222
+ vlan_destroy $ul22 111
+ __simple_if_fini $ul22
+ simple_if_fini $ul21 192.0.2.18/28
+}
+
+sw3_create()
+{
+ simple_if_init $ul31 192.0.2.65/28
+ __simple_if_init $ul32 v$ul31
+ vlan_create $ul32 111 v$ul31 192.0.2.34/28
+ vlan_create $ul32 222 v$ul31 192.0.2.50/28
+
+ ip route add vrf v$ul31 192.0.2.2/32 via 192.0.2.66
+ ip route add vrf v$ul31 192.0.2.1/32 \
+ nexthop via 192.0.2.33 \
+ nexthop via 192.0.2.49
+
+ tc qdisc add dev $ul32 clsact
+ tc filter add dev $ul32 ingress pref 111 prot 802.1Q \
+ flower vlan_id 111 action pass
+ tc filter add dev $ul32 ingress pref 222 prot 802.1Q \
+ flower vlan_id 222 action pass
+}
+
+sw3_destroy()
+{
+ tc qdisc del dev $ul32 clsact
+
+ ip route del vrf v$ul31 192.0.2.1/32
+ ip route del vrf v$ul31 192.0.2.2/32
+
+ vlan_destroy $ul32 222
+ vlan_destroy $ul32 111
+ __simple_if_fini $ul32
+ simple_if_fini $ul31 192.0.2.65/28
+}
+
+sw4_create()
+{
+ simple_if_init $ol4 203.0.113.1/24 2001:db8:2::1/64
+ __simple_if_init $ul4 v$ol4 192.0.2.66/28
+
+ tunnel_create g2 gre 192.0.2.2 192.0.2.1 tos inherit dev v$ol4
+ __simple_if_init g2 v$ol4 192.0.2.2/32
+ ip route add vrf v$ol4 192.0.2.1/32 via 192.0.2.65
+
+ ip route add vrf v$ol4 198.51.100.0/24 dev g2
+ ip -6 route add vrf v$ol4 2001:db8:1::/64 dev g2
+}
+
+sw4_destroy()
+{
+ ip -6 route del vrf v$ol4 2001:db8:1::/64
+ ip route del vrf v$ol4 198.51.100.0/24
+
+ ip route del vrf v$ol4 192.0.2.1/32
+ __simple_if_fini g2 192.0.2.2/32
+ tunnel_destroy g2
+
+ __simple_if_fini $ul4 192.0.2.66/28
+ simple_if_fini $ol4 203.0.113.1/24 2001:db8:2::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 203.0.113.2/24 2001:db8:2::2/64
+ ip route add vrf v$h2 default via 203.0.113.1 dev $h2
+ ip -6 route add vrf v$h2 default via 2001:db8:2::1 dev $h2
+}
+
+h2_destroy()
+{
+ ip -6 route del vrf v$h2 default
+ ip route del vrf v$h2 default
+ simple_if_fini $h2 203.0.113.2/24 2001:db8:2::2/64
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+
+ ol1=${NETIFS[p2]}
+ ul1=${NETIFS[p3]}
+
+ ul21=${NETIFS[p4]}
+ ul22=${NETIFS[p5]}
+
+ ul32=${NETIFS[p6]}
+ ul31=${NETIFS[p7]}
+
+ ul4=${NETIFS[p8]}
+ ol4=${NETIFS[p9]}
+
+ h2=${NETIFS[p10]}
+
+ vrf_prepare
+ h1_create
+ sw1_create
+ sw2_create
+ sw3_create
+ sw4_create
+ h2_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ h2_destroy
+ sw4_destroy
+ sw3_destroy
+ sw2_destroy
+ sw1_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 203.0.113.2
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::2
+}
+
+send_src_ipv4()
+{
+ $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_dst_ipv4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_src_udp4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ -d 1msec -t udp "sp=0-32768,dp=30000"
+}
+
+send_dst_udp4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ -d 1msec -t udp "sp=20000,dp=0-32768"
+}
+
+send_src_ipv6()
+{
+ $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_dst_ipv6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_flowlabel()
+{
+ # Generate 16384 echo requests, each with a random flow label.
+ for _ in $(seq 1 16384); do
+ ip vrf exec v$h1 \
+ $PING6 2001:db8:2::2 -F 0 -c 1 -q >/dev/null 2>&1
+ done
+}
+
+send_src_udp6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ -d 1msec -t udp "sp=0-32768,dp=30000"
+}
+
+send_dst_udp6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ -d 1msec -t udp "sp=20000,dp=0-32768"
+}
+
+custom_hash_test()
+{
+ local field="$1"; shift
+ local balanced="$1"; shift
+ local send_flows="$@"
+
+ RET=0
+
+ local t0_111=$(tc_rule_stats_get $ul32 111 ingress)
+ local t0_222=$(tc_rule_stats_get $ul32 222 ingress)
+
+ $send_flows
+
+ local t1_111=$(tc_rule_stats_get $ul32 111 ingress)
+ local t1_222=$(tc_rule_stats_get $ul32 222 ingress)
+
+ local d111=$((t1_111 - t0_111))
+ local d222=$((t1_222 - t0_222))
+
+ local diff=$((d222 - d111))
+ local sum=$((d111 + d222))
+
+ local pct=$(echo "$diff / $sum * 100" | bc -l)
+ local is_balanced=$(echo "-20 <= $pct && $pct <= 20" | bc)
+
+ [[ ( $is_balanced -eq 1 && $balanced == "balanced" ) ||
+ ( $is_balanced -eq 0 && $balanced == "unbalanced" ) ]]
+ check_err $? "Expected traffic to be $balanced, but it is not"
+
+ log_test "Multipath hash field: $field ($balanced)"
+ log_info "Packets sent on path1 / path2: $d111 / $d222"
+}
+
+custom_hash_v4()
+{
+ log_info "Running IPv4 overlay custom multipath hash tests"
+
+ # Prevent the neighbour table from overflowing, as different neighbour
+ # entries will be created on $ol4 when using different destination IPs.
+ sysctl_set net.ipv4.neigh.default.gc_thresh1 1024
+ sysctl_set net.ipv4.neigh.default.gc_thresh2 1024
+ sysctl_set net.ipv4.neigh.default.gc_thresh3 1024
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0040
+ custom_hash_test "Inner source IP" "balanced" send_src_ipv4
+ custom_hash_test "Inner source IP" "unbalanced" send_dst_ipv4
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0080
+ custom_hash_test "Inner destination IP" "balanced" send_dst_ipv4
+ custom_hash_test "Inner destination IP" "unbalanced" send_src_ipv4
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0400
+ custom_hash_test "Inner source port" "balanced" send_src_udp4
+ custom_hash_test "Inner source port" "unbalanced" send_dst_udp4
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0800
+ custom_hash_test "Inner destination port" "balanced" send_dst_udp4
+ custom_hash_test "Inner destination port" "unbalanced" send_src_udp4
+
+ sysctl_restore net.ipv4.neigh.default.gc_thresh3
+ sysctl_restore net.ipv4.neigh.default.gc_thresh2
+ sysctl_restore net.ipv4.neigh.default.gc_thresh1
+}
+
+custom_hash_v6()
+{
+ log_info "Running IPv6 overlay custom multipath hash tests"
+
+ # Prevent the neighbour table from overflowing, as different neighbour
+ # entries will be created on $ol4 when using different destination IPs.
+ sysctl_set net.ipv6.neigh.default.gc_thresh1 1024
+ sysctl_set net.ipv6.neigh.default.gc_thresh2 1024
+ sysctl_set net.ipv6.neigh.default.gc_thresh3 1024
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0040
+ custom_hash_test "Inner source IP" "balanced" send_src_ipv6
+ custom_hash_test "Inner source IP" "unbalanced" send_dst_ipv6
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0080
+ custom_hash_test "Inner destination IP" "balanced" send_dst_ipv6
+ custom_hash_test "Inner destination IP" "unbalanced" send_src_ipv6
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0200
+ custom_hash_test "Inner flowlabel" "balanced" send_flowlabel
+ custom_hash_test "Inner flowlabel" "unbalanced" send_src_ipv6
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0400
+ custom_hash_test "Inner source port" "balanced" send_src_udp6
+ custom_hash_test "Inner source port" "unbalanced" send_dst_udp6
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0800
+ custom_hash_test "Inner destination port" "balanced" send_dst_udp6
+ custom_hash_test "Inner destination port" "unbalanced" send_src_udp6
+
+ sysctl_restore net.ipv6.neigh.default.gc_thresh3
+ sysctl_restore net.ipv6.neigh.default.gc_thresh2
+ sysctl_restore net.ipv6.neigh.default.gc_thresh1
+}
+
+custom_hash()
+{
+ # Test that when the hash policy is set to custom, traffic is
+ # distributed only according to the fields set in the
+ # fib_multipath_hash_fields sysctl.
+ #
+ # Each time set a different field and make sure traffic is only
+ # distributed when the field is changed in the packet stream.
+
+ sysctl_set net.ipv4.fib_multipath_hash_policy 3
+
+ custom_hash_v4
+ custom_hash_v6
+
+ sysctl_restore net.ipv4.fib_multipath_hash_policy
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
new file mode 100755
index 000000000000..8fea2c2e0b25
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
@@ -0,0 +1,458 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test traffic distribution when there are multiple paths between an IPv6 GRE
+# tunnel. The tunnel carries IPv4 and IPv6 traffic between multiple hosts.
+# Multiple routes are in the underlay network. With the default multipath
+# policy, SW2 will only look at the outer IP addresses, hence only a single
+# route would be used.
+#
+# +--------------------------------+
+# | H1 |
+# | $h1 + |
+# | 198.51.100.{2-253}/24 | |
+# | 2001:db8:1::{2-fd}/64 | |
+# +-------------------------|------+
+# |
+# +-------------------------|-------------------+
+# | SW1 | |
+# | $ol1 + |
+# | 198.51.100.1/24 |
+# | 2001:db8:1::1/64 |
+# | |
+# |+ g1 (ip6gre) |
+# | loc=2001:db8:3::1 |
+# | rem=2001:db8:3::2 -. |
+# | tos=inherit | |
+# | v |
+# | + $ul1 |
+# | | 2001:db8:10::1/64 |
+# +---------------------|-----------------------+
+# |
+# +---------------------|-----------------------+
+# | SW2 | |
+# | $ul21 + |
+# | 2001:db8:10::2/64 | |
+# | | |
+# ! __________________+___ |
+# | / \ |
+# | | | |
+# | + $ul22.111 (vlan) + $ul22.222 (vlan) |
+# | | 2001:db8:11::1/64 | 2001:db8:12::1/64 |
+# | | | |
+# +--|----------------------|-------------------+
+# | |
+# +--|----------------------|-------------------+
+# | | | |
+# | + $ul32.111 (vlan) + $ul32.222 (vlan) |
+# | | 2001:db8:11::2/64 | 2001:db8:12::2/64 |
+# | | | |
+# | \__________________+___/ |
+# | | |
+# | | |
+# | $ul31 + |
+# | 2001:db8:13::1/64 | SW3 |
+# +---------------------|-----------------------+
+# |
+# +---------------------|-----------------------+
+# | + $ul4 |
+# | ^ 2001:db8:13::2/64 |
+# | | |
+# |+ g2 (ip6gre) | |
+# | loc=2001:db8:3::2 | |
+# | rem=2001:db8:3::1 -' |
+# | tos=inherit |
+# | |
+# | $ol4 + |
+# | 203.0.113.1/24 | |
+# | 2001:db8:2::1/64 | SW4 |
+# +-------------------------|-------------------+
+# |
+# +-------------------------|------+
+# | | |
+# | $h2 + |
+# | 203.0.113.{2-253}/24 |
+# | 2001:db8:2::{2-fd}/64 H2 |
+# +--------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ custom_hash
+"
+
+NUM_NETIFS=10
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 198.51.100.2/24 2001:db8:1::2/64
+ ip route add vrf v$h1 default via 198.51.100.1 dev $h1
+ ip -6 route add vrf v$h1 default via 2001:db8:1::1 dev $h1
+}
+
+h1_destroy()
+{
+ ip -6 route del vrf v$h1 default
+ ip route del vrf v$h1 default
+ simple_if_fini $h1 198.51.100.2/24 2001:db8:1::2/64
+}
+
+sw1_create()
+{
+ simple_if_init $ol1 198.51.100.1/24 2001:db8:1::1/64
+ __simple_if_init $ul1 v$ol1 2001:db8:10::1/64
+
+ tunnel_create g1 ip6gre 2001:db8:3::1 2001:db8:3::2 tos inherit \
+ dev v$ol1
+ __simple_if_init g1 v$ol1 2001:db8:3::1/128
+ ip route add vrf v$ol1 2001:db8:3::2/128 via 2001:db8:10::2
+
+ ip route add vrf v$ol1 203.0.113.0/24 dev g1
+ ip -6 route add vrf v$ol1 2001:db8:2::/64 dev g1
+}
+
+sw1_destroy()
+{
+ ip -6 route del vrf v$ol1 2001:db8:2::/64
+ ip route del vrf v$ol1 203.0.113.0/24
+
+ ip route del vrf v$ol1 2001:db8:3::2/128
+ __simple_if_fini g1 2001:db8:3::1/128
+ tunnel_destroy g1
+
+ __simple_if_fini $ul1 2001:db8:10::1/64
+ simple_if_fini $ol1 198.51.100.1/24 2001:db8:1::1/64
+}
+
+sw2_create()
+{
+ simple_if_init $ul21 2001:db8:10::2/64
+ __simple_if_init $ul22 v$ul21
+ vlan_create $ul22 111 v$ul21 2001:db8:11::1/64
+ vlan_create $ul22 222 v$ul21 2001:db8:12::1/64
+
+ ip -6 route add vrf v$ul21 2001:db8:3::1/128 via 2001:db8:10::1
+ ip -6 route add vrf v$ul21 2001:db8:3::2/128 \
+ nexthop via 2001:db8:11::2 \
+ nexthop via 2001:db8:12::2
+}
+
+sw2_destroy()
+{
+ ip -6 route del vrf v$ul21 2001:db8:3::2/128
+ ip -6 route del vrf v$ul21 2001:db8:3::1/128
+
+ vlan_destroy $ul22 222
+ vlan_destroy $ul22 111
+ __simple_if_fini $ul22
+ simple_if_fini $ul21 2001:db8:10::2/64
+}
+
+sw3_create()
+{
+ simple_if_init $ul31 2001:db8:13::1/64
+ __simple_if_init $ul32 v$ul31
+ vlan_create $ul32 111 v$ul31 2001:db8:11::2/64
+ vlan_create $ul32 222 v$ul31 2001:db8:12::2/64
+
+ ip -6 route add vrf v$ul31 2001:db8:3::2/128 via 2001:db8:13::2
+ ip -6 route add vrf v$ul31 2001:db8:3::1/128 \
+ nexthop via 2001:db8:11::1 \
+ nexthop via 2001:db8:12::1
+
+ tc qdisc add dev $ul32 clsact
+ tc filter add dev $ul32 ingress pref 111 prot 802.1Q \
+ flower vlan_id 111 action pass
+ tc filter add dev $ul32 ingress pref 222 prot 802.1Q \
+ flower vlan_id 222 action pass
+}
+
+sw3_destroy()
+{
+ tc qdisc del dev $ul32 clsact
+
+ ip -6 route del vrf v$ul31 2001:db8:3::1/128
+ ip -6 route del vrf v$ul31 2001:db8:3::2/128
+
+ vlan_destroy $ul32 222
+ vlan_destroy $ul32 111
+ __simple_if_fini $ul32
+ simple_if_fini $ul31 2001:db8:13::1/64
+}
+
+sw4_create()
+{
+ simple_if_init $ol4 203.0.113.1/24 2001:db8:2::1/64
+ __simple_if_init $ul4 v$ol4 2001:db8:13::2/64
+
+ tunnel_create g2 ip6gre 2001:db8:3::2 2001:db8:3::1 tos inherit \
+ dev v$ol4
+ __simple_if_init g2 v$ol4 2001:db8:3::2/128
+ ip -6 route add vrf v$ol4 2001:db8:3::1/128 via 2001:db8:13::1
+
+ ip route add vrf v$ol4 198.51.100.0/24 dev g2
+ ip -6 route add vrf v$ol4 2001:db8:1::/64 dev g2
+}
+
+sw4_destroy()
+{
+ ip -6 route del vrf v$ol4 2001:db8:1::/64
+ ip route del vrf v$ol4 198.51.100.0/24
+
+ ip -6 route del vrf v$ol4 2001:db8:3::1/128
+ __simple_if_fini g2 2001:db8:3::2/128
+ tunnel_destroy g2
+
+ __simple_if_fini $ul4 2001:db8:13::2/64
+ simple_if_fini $ol4 203.0.113.1/24 2001:db8:2::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 203.0.113.2/24 2001:db8:2::2/64
+ ip route add vrf v$h2 default via 203.0.113.1 dev $h2
+ ip -6 route add vrf v$h2 default via 2001:db8:2::1 dev $h2
+}
+
+h2_destroy()
+{
+ ip -6 route del vrf v$h2 default
+ ip route del vrf v$h2 default
+ simple_if_fini $h2 203.0.113.2/24 2001:db8:2::2/64
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+
+ ol1=${NETIFS[p2]}
+ ul1=${NETIFS[p3]}
+
+ ul21=${NETIFS[p4]}
+ ul22=${NETIFS[p5]}
+
+ ul32=${NETIFS[p6]}
+ ul31=${NETIFS[p7]}
+
+ ul4=${NETIFS[p8]}
+ ol4=${NETIFS[p9]}
+
+ h2=${NETIFS[p10]}
+
+ vrf_prepare
+ h1_create
+ sw1_create
+ sw2_create
+ sw3_create
+ sw4_create
+ h2_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ h2_destroy
+ sw4_destroy
+ sw3_destroy
+ sw2_destroy
+ sw1_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 203.0.113.2
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::2
+}
+
+send_src_ipv4()
+{
+ $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_dst_ipv4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_src_udp4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ -d 1msec -t udp "sp=0-32768,dp=30000"
+}
+
+send_dst_udp4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ -d 1msec -t udp "sp=20000,dp=0-32768"
+}
+
+send_src_ipv6()
+{
+ $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_dst_ipv6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_flowlabel()
+{
+ # Generate 16384 echo requests, each with a random flow label.
+ for _ in $(seq 1 16384); do
+ ip vrf exec v$h1 \
+ $PING6 2001:db8:2::2 -F 0 -c 1 -q >/dev/null 2>&1
+ done
+}
+
+send_src_udp6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ -d 1msec -t udp "sp=0-32768,dp=30000"
+}
+
+send_dst_udp6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ -d 1msec -t udp "sp=20000,dp=0-32768"
+}
+
+custom_hash_test()
+{
+ local field="$1"; shift
+ local balanced="$1"; shift
+ local send_flows="$@"
+
+ RET=0
+
+ local t0_111=$(tc_rule_stats_get $ul32 111 ingress)
+ local t0_222=$(tc_rule_stats_get $ul32 222 ingress)
+
+ $send_flows
+
+ local t1_111=$(tc_rule_stats_get $ul32 111 ingress)
+ local t1_222=$(tc_rule_stats_get $ul32 222 ingress)
+
+ local d111=$((t1_111 - t0_111))
+ local d222=$((t1_222 - t0_222))
+
+ local diff=$((d222 - d111))
+ local sum=$((d111 + d222))
+
+ local pct=$(echo "$diff / $sum * 100" | bc -l)
+ local is_balanced=$(echo "-20 <= $pct && $pct <= 20" | bc)
+
+ [[ ( $is_balanced -eq 1 && $balanced == "balanced" ) ||
+ ( $is_balanced -eq 0 && $balanced == "unbalanced" ) ]]
+ check_err $? "Expected traffic to be $balanced, but it is not"
+
+ log_test "Multipath hash field: $field ($balanced)"
+ log_info "Packets sent on path1 / path2: $d111 / $d222"
+}
+
+custom_hash_v4()
+{
+ log_info "Running IPv4 overlay custom multipath hash tests"
+
+ # Prevent the neighbour table from overflowing, as different neighbour
+ # entries will be created on $ol4 when using different destination IPs.
+ sysctl_set net.ipv4.neigh.default.gc_thresh1 1024
+ sysctl_set net.ipv4.neigh.default.gc_thresh2 1024
+ sysctl_set net.ipv4.neigh.default.gc_thresh3 1024
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0040
+ custom_hash_test "Inner source IP" "balanced" send_src_ipv4
+ custom_hash_test "Inner source IP" "unbalanced" send_dst_ipv4
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0080
+ custom_hash_test "Inner destination IP" "balanced" send_dst_ipv4
+ custom_hash_test "Inner destination IP" "unbalanced" send_src_ipv4
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0400
+ custom_hash_test "Inner source port" "balanced" send_src_udp4
+ custom_hash_test "Inner source port" "unbalanced" send_dst_udp4
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0800
+ custom_hash_test "Inner destination port" "balanced" send_dst_udp4
+ custom_hash_test "Inner destination port" "unbalanced" send_src_udp4
+
+ sysctl_restore net.ipv4.neigh.default.gc_thresh3
+ sysctl_restore net.ipv4.neigh.default.gc_thresh2
+ sysctl_restore net.ipv4.neigh.default.gc_thresh1
+}
+
+custom_hash_v6()
+{
+ log_info "Running IPv6 overlay custom multipath hash tests"
+
+ # Prevent the neighbour table from overflowing, as different neighbour
+ # entries will be created on $ol4 when using different destination IPs.
+ sysctl_set net.ipv6.neigh.default.gc_thresh1 1024
+ sysctl_set net.ipv6.neigh.default.gc_thresh2 1024
+ sysctl_set net.ipv6.neigh.default.gc_thresh3 1024
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0040
+ custom_hash_test "Inner source IP" "balanced" send_src_ipv6
+ custom_hash_test "Inner source IP" "unbalanced" send_dst_ipv6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0080
+ custom_hash_test "Inner destination IP" "balanced" send_dst_ipv6
+ custom_hash_test "Inner destination IP" "unbalanced" send_src_ipv6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0200
+ custom_hash_test "Inner flowlabel" "balanced" send_flowlabel
+ custom_hash_test "Inner flowlabel" "unbalanced" send_src_ipv6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0400
+ custom_hash_test "Inner source port" "balanced" send_src_udp6
+ custom_hash_test "Inner source port" "unbalanced" send_dst_udp6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0800
+ custom_hash_test "Inner destination port" "balanced" send_dst_udp6
+ custom_hash_test "Inner destination port" "unbalanced" send_src_udp6
+
+ sysctl_restore net.ipv6.neigh.default.gc_thresh3
+ sysctl_restore net.ipv6.neigh.default.gc_thresh2
+ sysctl_restore net.ipv6.neigh.default.gc_thresh1
+}
+
+custom_hash()
+{
+ # Test that when the hash policy is set to custom, traffic is
+ # distributed only according to the fields set in the
+ # fib_multipath_hash_fields sysctl.
+ #
+ # Each time set a different field and make sure traffic is only
+ # distributed when the field is changed in the packet stream.
+
+ sysctl_set net.ipv6.fib_multipath_hash_policy 3
+
+ custom_hash_v4
+ custom_hash_v6
+
+ sysctl_restore net.ipv6.fib_multipath_hash_policy
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/unicast_extensions.sh b/tools/testing/selftests/net/unicast_extensions.sh
index dbf0421986df..66354cdd5ce4 100755
--- a/tools/testing/selftests/net/unicast_extensions.sh
+++ b/tools/testing/selftests/net/unicast_extensions.sh
@@ -189,6 +189,15 @@ segmenttest 255.255.255.1 255.255.255.254 24 "assign and ping inside 255.255.255
route_test 240.5.6.7 240.5.6.1 255.1.2.1 255.1.2.3 24 "route between 240.5.6/24 and 255.1.2/24 (is allowed)"
route_test 0.200.6.7 0.200.38.1 245.99.101.1 245.99.200.111 16 "route between 0.200/16 and 245.99/16 (is allowed)"
#
+# Test support for lowest address ending in .0
+segmenttest 5.10.15.20 5.10.15.0 24 "assign and ping lowest address (/24)"
+#
+# Test support for lowest address not ending in .0
+segmenttest 192.168.101.192 192.168.101.193 26 "assign and ping lowest address (/26)"
+#
+# Routing using lowest address as a gateway/endpoint
+route_test 192.168.42.1 192.168.42.0 9.8.7.6 9.8.7.0 24 "routing using lowest address"
+#
# ==============================================
# ==== TESTS THAT CURRENTLY EXPECT FAILURE =====
# ==============================================
@@ -202,14 +211,6 @@ segmenttest 255.255.255.1 255.255.255.255 16 "assigning 255.255.255.255 (is forb
# Currently Linux does not allow this, so this should fail too
segmenttest 127.99.4.5 127.99.4.6 16 "assign and ping inside 127/8 (is forbidden)"
#
-# Test support for lowest address
-# Currently Linux does not allow this, so this should fail too
-segmenttest 5.10.15.20 5.10.15.0 24 "assign and ping lowest address (is forbidden)"
-#
-# Routing using lowest address as a gateway/endpoint
-# Currently Linux does not allow this, so this should fail too
-route_test 192.168.42.1 192.168.42.0 9.8.7.6 9.8.7.0 24 "routing using lowest address (is forbidden)"
-#
# Test support for unicast use of class D
# Currently Linux does not allow this, so this should fail too
segmenttest 225.1.2.3 225.1.2.200 24 "assign and ping class D address (is forbidden)"