aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/bpf')
-rw-r--r--tools/testing/selftests/bpf/.gitignore3
-rw-r--r--tools/testing/selftests/bpf/Makefile81
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c161
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c111
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c285
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c148
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_extern.c169
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cpu_mask.c78
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_fexit.c101
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_test.c69
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mmap.c56
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_buffer.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/probe_user.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rdonly_maps.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/select_reuseport.c (renamed from tools/testing/selftests/bpf/test_select_reuseport.c)514
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_ctx.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skeleton.c63
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c77
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c82
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_perf.c25
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___equiv_zero_sz_arr.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_zero_sz_arr.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___fixed_arr.c3
-rw-r--r--tools/testing/selftests/bpf/progs/core_reloc_types.h39
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c34
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_extern.c62
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_ctx.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_skeleton.c46
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_attach.c571
-rw-r--r--tools/testing/selftests/bpf/test_cpp.cpp10
-rw-r--r--tools/testing/selftests/bpf/test_progs.h4
34 files changed, 1644 insertions, 1227 deletions
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 419652458da4..b139b3d75ebb 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -21,7 +21,6 @@ test_lirc_mode2_user
get_cgroup_id_user
test_skb_cgroup_id_user
test_socket_cookie
-test_cgroup_attach
test_cgroup_storage
test_select_reuseport
test_flow_dissector
@@ -38,5 +37,7 @@ test_hashmap
test_btf_dump
xdping
test_cpp
+*.skel.h
/no_alu32
/bpf_gcc
+/tools
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index e0fe01d9ec33..41691fb067da 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -3,10 +3,12 @@ include ../../../../scripts/Kbuild.include
include ../../../scripts/Makefile.arch
CURDIR := $(abspath .)
-LIBDIR := $(abspath ../../../lib)
+TOOLSDIR := $(abspath ../../..)
+LIBDIR := $(TOOLSDIR)/lib
BPFDIR := $(LIBDIR)/bpf
-TOOLSDIR := $(abspath ../../../include)
-APIDIR := $(TOOLSDIR)/uapi
+TOOLSINCDIR := $(TOOLSDIR)/include
+BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool
+APIDIR := $(TOOLSINCDIR)/uapi
GENDIR := $(abspath ../../../../include/generated)
GENHDR := $(GENDIR)/autoconf.h
@@ -19,18 +21,18 @@ LLC ?= llc
LLVM_OBJCOPY ?= llvm-objcopy
BPF_GCC ?= $(shell command -v bpf-gcc;)
CFLAGS += -g -Wall -O2 $(GENFLAGS) -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) \
- -I$(GENDIR) -I$(TOOLSDIR) -I$(CURDIR) \
+ -I$(GENDIR) -I$(TOOLSINCDIR) -I$(CURDIR) \
-Dbpf_prog_load=bpf_prog_test_load \
-Dbpf_load_program=bpf_test_load_program
-LDLIBS += -lcap -lelf -lrt -lpthread
+LDLIBS += -lcap -lelf -lz -lrt -lpthread
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
- test_cgroup_storage test_select_reuseport \
+ test_cgroup_storage \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \
- test_cgroup_attach test_progs-no_alu32
+ test_progs-no_alu32
# Also test bpf-gcc, if present
ifneq ($(BPF_GCC),)
@@ -75,6 +77,24 @@ TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
TEST_CUSTOM_PROGS = urandom_read
+# Emit succinct information message describing current building step
+# $1 - generic step name (e.g., CC, LINK, etc);
+# $2 - optional "flavor" specifier; if provided, will be emitted as [flavor];
+# $3 - target (assumed to be file); only file name will be emitted;
+# $4 - optional extra arg, emitted as-is, if provided.
+ifeq ($(V),1)
+msg =
+else
+msg = @$(info $(1)$(if $(2), [$(2)]) $(notdir $(3)))$(if $(4), $(4))
+endif
+
+# override lib.mk's default rules
+OVERRIDE_TARGETS := 1
+override define CLEAN
+ $(call msg, CLEAN)
+ $(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
+endef
+
include ../lib.mk
# Define simple and short `make test_progs`, `make test_sysctl`, etc targets
@@ -87,10 +107,16 @@ $(notdir $(TEST_GEN_PROGS) \
$(TEST_GEN_PROGS_EXTENDED) \
$(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ;
+$(OUTPUT)/%:%.c
+ $(call msg, BINARY,,$@)
+ $(LINK.c) $^ $(LDLIBS) -o $@
+
$(OUTPUT)/urandom_read: urandom_read.c
+ $(call msg, BINARY,,$@)
$(CC) -o $@ $< -Wl,--build-id
$(OUTPUT)/test_stub.o: test_stub.c
+ $(call msg, CC,,$@)
$(CC) -c $(CFLAGS) -o $@ $<
BPFOBJ := $(OUTPUT)/libbpf.a
@@ -110,13 +136,18 @@ $(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
$(OUTPUT)/test_netcnt: cgroup_helpers.c
$(OUTPUT)/test_sock_fields: cgroup_helpers.c
$(OUTPUT)/test_sysctl: cgroup_helpers.c
-$(OUTPUT)/test_cgroup_attach: cgroup_helpers.c
.PHONY: force
# force a rebuild of BPFOBJ when its dependencies are updated
force:
+DEFAULT_BPFTOOL := $(OUTPUT)/tools/usr/local/sbin/bpftool
+BPFTOOL ?= $(DEFAULT_BPFTOOL)
+
+$(DEFAULT_BPFTOOL): force
+ $(MAKE) -C $(BPFTOOLDIR) DESTDIR=$(OUTPUT)/tools install
+
$(BPFOBJ): force
$(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
@@ -159,27 +190,33 @@ $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
# $3 - CFLAGS
# $4 - LDFLAGS
define CLANG_BPF_BUILD_RULE
+ $(call msg, CLANG-LLC,$(TRUNNER_BINARY),$2)
($(CLANG) $3 -O2 -target bpf -emit-llvm \
-c $1 -o - || echo "BPF obj compilation failed") | \
$(LLC) -mattr=dwarfris -march=bpf -mcpu=probe $4 -filetype=obj -o $2
endef
# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
define CLANG_NOALU32_BPF_BUILD_RULE
+ $(call msg, CLANG-LLC,$(TRUNNER_BINARY),$2)
($(CLANG) $3 -O2 -target bpf -emit-llvm \
-c $1 -o - || echo "BPF obj compilation failed") | \
$(LLC) -march=bpf -mcpu=v2 $4 -filetype=obj -o $2
endef
# Similar to CLANG_BPF_BUILD_RULE, but using native Clang and bpf LLC
define CLANG_NATIVE_BPF_BUILD_RULE
+ $(call msg, CLANG-BPF,$(TRUNNER_BINARY),$2)
($(CLANG) $3 -O2 -emit-llvm \
-c $1 -o - || echo "BPF obj compilation failed") | \
$(LLC) -march=bpf -mcpu=probe $4 -filetype=obj -o $2
endef
# Build BPF object using GCC
define GCC_BPF_BUILD_RULE
+ $(call msg, GCC-BPF,$(TRUNNER_BINARY),$2)
$(BPF_GCC) $3 $4 -O2 -c $1 -o $2
endef
+SKEL_BLACKLIST := btf__% test_pinning_invalid.c
+
# Set up extra TRUNNER_XXX "temporary" variables in the environment (relies on
# $eval()) and pass control to DEFINE_TEST_RUNNER_RULES.
# Parameters:
@@ -195,8 +232,11 @@ TRUNNER_EXTRA_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \
$$(filter %.c,$(TRUNNER_EXTRA_SOURCES)))
TRUNNER_EXTRA_HDRS := $$(filter %.h,$(TRUNNER_EXTRA_SOURCES))
TRUNNER_TESTS_HDR := $(TRUNNER_TESTS_DIR)/tests.h
-TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \
- $$(notdir $$(wildcard $(TRUNNER_BPF_PROGS_DIR)/*.c)))
+TRUNNER_BPF_SRCS := $$(notdir $$(wildcard $(TRUNNER_BPF_PROGS_DIR)/*.c))
+TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS))
+TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \
+ $$(filter-out $(SKEL_BLACKLIST), \
+ $$(TRUNNER_BPF_SRCS)))
# Evaluate rules now with extra TRUNNER_XXX variables above already defined
$$(eval $$(call DEFINE_TEST_RUNNER_RULES,$1,$2))
@@ -226,12 +266,19 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
$(TRUNNER_BPF_CFLAGS), \
$(TRUNNER_BPF_LDFLAGS))
+
+$(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h: \
+ $(TRUNNER_OUTPUT)/%.o \
+ | $(BPFTOOL) $(TRUNNER_OUTPUT)
+ $$(call msg, GEN-SKEL,$(TRUNNER_BINARY),$$@)
+ $$(BPFTOOL) gen skeleton $$< > $$@
endif
# ensure we set up tests.h header generation rule just once
ifeq ($($(TRUNNER_TESTS_DIR)-tests-hdr),)
$(TRUNNER_TESTS_DIR)-tests-hdr := y
$(TRUNNER_TESTS_HDR): $(TRUNNER_TESTS_DIR)/*.c
+ $$(call msg, TEST-HDR,$(TRUNNER_BINARY),$$@)
$$(shell ( cd $(TRUNNER_TESTS_DIR); \
echo '/* Generated header, do not edit */'; \
ls *.c 2> /dev/null | \
@@ -245,7 +292,9 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
$(TRUNNER_TESTS_DIR)/%.c \
$(TRUNNER_EXTRA_HDRS) \
$(TRUNNER_BPF_OBJS) \
+ $(TRUNNER_BPF_SKELS) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
+ $$(call msg, TEST-OBJ,$(TRUNNER_BINARY),$$@)
cd $$(@D) && $$(CC) $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
$(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
@@ -253,17 +302,20 @@ $(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$(TRUNNER_EXTRA_HDRS) \
$(TRUNNER_TESTS_HDR) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
+ $$(call msg, EXTRA-OBJ,$(TRUNNER_BINARY),$$@)
$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
+# only copy extra resources if in flavored build
$(TRUNNER_BINARY)-extras: $(TRUNNER_EXTRA_FILES) | $(TRUNNER_OUTPUT)
ifneq ($2,)
- # only copy extra resources if in flavored build
+ $$(call msg, EXTRAS-CP,$(TRUNNER_BINARY),$(TRUNNER_EXTRA_FILES))
cp -a $$^ $(TRUNNER_OUTPUT)/
endif
$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
$(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \
| $(TRUNNER_BINARY)-extras
+ $$(call msg, BINARY,,$$@)
$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
endef
@@ -315,12 +367,15 @@ verifier/tests.h: verifier/*.c
echo '#endif' \
) > verifier/tests.h)
$(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
+ $(call msg, BINARY,,$@)
$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
# Make sure we are able to include and link libbpf against c++.
-$(OUTPUT)/test_cpp: test_cpp.cpp $(BPFOBJ)
+$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
+ $(call msg, CXX,,$@)
$(CXX) $(CFLAGS) $^ $(LDLIBS) -o $@
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
- feature $(OUTPUT)/*.o $(OUTPUT)/no_alu32 $(OUTPUT)/bpf_gcc
+ feature $(OUTPUT)/*.o $(OUTPUT)/no_alu32 $(OUTPUT)/bpf_gcc \
+ tools *.skel.h
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index a83111a32d4a..a0ee87c8e1ea 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -1,26 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
-
-#define EMBED_FILE(NAME, PATH) \
-asm ( \
-" .pushsection \".rodata\", \"a\", @progbits \n" \
-" .global "#NAME"_data \n" \
-#NAME"_data: \n" \
-" .incbin \"" PATH "\" \n" \
-#NAME"_data_end: \n" \
-" .global "#NAME"_size \n" \
-" .type "#NAME"_size, @object \n" \
-" .size "#NAME"_size, 4 \n" \
-" .align 4, \n" \
-#NAME"_size: \n" \
-" .int "#NAME"_data_end - "#NAME"_data \n" \
-" .popsection \n" \
-); \
-extern char NAME##_data[]; \
-extern int NAME##_size;
+#include "test_attach_probe.skel.h"
ssize_t get_base_addr() {
- size_t start;
+ size_t start, offset;
char buf[256];
FILE *f;
@@ -28,10 +11,11 @@ ssize_t get_base_addr() {
if (!f)
return -errno;
- while (fscanf(f, "%zx-%*x %s %*s\n", &start, buf) == 2) {
+ while (fscanf(f, "%zx-%*x %s %zx %*[^\n]\n",
+ &start, buf, &offset) == 3) {
if (strcmp(buf, "r-xp") == 0) {
fclose(f);
- return start;
+ return start - offset;
}
}
@@ -39,30 +23,12 @@ ssize_t get_base_addr() {
return -EINVAL;
}
-EMBED_FILE(probe, "test_attach_probe.o");
-
void test_attach_probe(void)
{
- const char *kprobe_name = "kprobe/sys_nanosleep";
- const char *kretprobe_name = "kretprobe/sys_nanosleep";
- const char *uprobe_name = "uprobe/trigger_func";
- const char *uretprobe_name = "uretprobe/trigger_func";
- const int kprobe_idx = 0, kretprobe_idx = 1;
- const int uprobe_idx = 2, uretprobe_idx = 3;
- const char *obj_name = "attach_probe";
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
- .object_name = obj_name,
- .relaxed_maps = true,
- );
- struct bpf_program *kprobe_prog, *kretprobe_prog;
- struct bpf_program *uprobe_prog, *uretprobe_prog;
- struct bpf_object *obj;
- int err, duration = 0, res;
- struct bpf_link *kprobe_link = NULL;
- struct bpf_link *kretprobe_link = NULL;
- struct bpf_link *uprobe_link = NULL;
- struct bpf_link *uretprobe_link = NULL;
- int results_map_fd;
+ int duration = 0;
+ struct bpf_link *kprobe_link, *kretprobe_link;
+ struct bpf_link *uprobe_link, *uretprobe_link;
+ struct test_attach_probe* skel;
size_t uprobe_offset;
ssize_t base_addr;
@@ -72,123 +38,68 @@ void test_attach_probe(void)
return;
uprobe_offset = (size_t)&get_base_addr - base_addr;
- /* open object */
- obj = bpf_object__open_mem(probe_data, probe_size, &open_opts);
- if (CHECK(IS_ERR(obj), "obj_open_mem", "err %ld\n", PTR_ERR(obj)))
+ skel = test_attach_probe__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
-
- if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name",
- "wrong obj name '%s', expected '%s'\n",
- bpf_object__name(obj), obj_name))
+ if (CHECK(!skel->bss, "check_bss", ".bss wasn't mmap()-ed\n"))
goto cleanup;
- kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name);
- if (CHECK(!kprobe_prog, "find_probe",
- "prog '%s' not found\n", kprobe_name))
- goto cleanup;
- kretprobe_prog = bpf_object__find_program_by_title(obj, kretprobe_name);
- if (CHECK(!kretprobe_prog, "find_probe",
- "prog '%s' not found\n", kretprobe_name))
- goto cleanup;
- uprobe_prog = bpf_object__find_program_by_title(obj, uprobe_name);
- if (CHECK(!uprobe_prog, "find_probe",
- "prog '%s' not found\n", uprobe_name))
- goto cleanup;
- uretprobe_prog = bpf_object__find_program_by_title(obj, uretprobe_name);
- if (CHECK(!uretprobe_prog, "find_probe",
- "prog '%s' not found\n", uretprobe_name))
- goto cleanup;
-
- /* create maps && load programs */
- err = bpf_object__load(obj);
- if (CHECK(err, "obj_load", "err %d\n", err))
- goto cleanup;
-
- /* load maps */
- results_map_fd = bpf_find_map(__func__, obj, "results_map");
- if (CHECK(results_map_fd < 0, "find_results_map",
- "err %d\n", results_map_fd))
- goto cleanup;
-
- kprobe_link = bpf_program__attach_kprobe(kprobe_prog,
+ kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe,
false /* retprobe */,
SYS_NANOSLEEP_KPROBE_NAME);
if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
- "err %ld\n", PTR_ERR(kprobe_link))) {
- kprobe_link = NULL;
+ "err %ld\n", PTR_ERR(kprobe_link)))
goto cleanup;
- }
- kretprobe_link = bpf_program__attach_kprobe(kretprobe_prog,
+ skel->links.handle_kprobe = kprobe_link;
+
+ kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe,
true /* retprobe */,
SYS_NANOSLEEP_KPROBE_NAME);
if (CHECK(IS_ERR(kretprobe_link), "attach_kretprobe",
- "err %ld\n", PTR_ERR(kretprobe_link))) {
- kretprobe_link = NULL;
+ "err %ld\n", PTR_ERR(kretprobe_link)))
goto cleanup;
- }
- uprobe_link = bpf_program__attach_uprobe(uprobe_prog,
+ skel->links.handle_kretprobe = kretprobe_link;
+
+ uprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uprobe,
false /* retprobe */,
0 /* self pid */,
"/proc/self/exe",
uprobe_offset);
if (CHECK(IS_ERR(uprobe_link), "attach_uprobe",
- "err %ld\n", PTR_ERR(uprobe_link))) {
- uprobe_link = NULL;
+ "err %ld\n", PTR_ERR(uprobe_link)))
goto cleanup;
- }
- uretprobe_link = bpf_program__attach_uprobe(uretprobe_prog,
+ skel->links.handle_uprobe = uprobe_link;
+
+ uretprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uretprobe,
true /* retprobe */,
-1 /* any pid */,
"/proc/self/exe",
uprobe_offset);
if (CHECK(IS_ERR(uretprobe_link), "attach_uretprobe",
- "err %ld\n", PTR_ERR(uretprobe_link))) {
- uretprobe_link = NULL;
+ "err %ld\n", PTR_ERR(uretprobe_link)))
goto cleanup;
- }
+ skel->links.handle_uretprobe = uretprobe_link;
/* trigger & validate kprobe && kretprobe */
usleep(1);
- err = bpf_map_lookup_elem(results_map_fd, &kprobe_idx, &res);
- if (CHECK(err, "get_kprobe_res",
- "failed to get kprobe res: %d\n", err))
+ if (CHECK(skel->bss->kprobe_res != 1, "check_kprobe_res",
+ "wrong kprobe res: %d\n", skel->bss->kprobe_res))
goto cleanup;
- if (CHECK(res != kprobe_idx + 1, "check_kprobe_res",
- "wrong kprobe res: %d\n", res))
- goto cleanup;
-
- err = bpf_map_lookup_elem(results_map_fd, &kretprobe_idx, &res);
- if (CHECK(err, "get_kretprobe_res",
- "failed to get kretprobe res: %d\n", err))
- goto cleanup;
- if (CHECK(res != kretprobe_idx + 1, "check_kretprobe_res",
- "wrong kretprobe res: %d\n", res))
+ if (CHECK(skel->bss->kretprobe_res != 2, "check_kretprobe_res",
+ "wrong kretprobe res: %d\n", skel->bss->kretprobe_res))
goto cleanup;
/* trigger & validate uprobe & uretprobe */
get_base_addr();
- err = bpf_map_lookup_elem(results_map_fd, &uprobe_idx, &res);
- if (CHECK(err, "get_uprobe_res",
- "failed to get uprobe res: %d\n", err))
- goto cleanup;
- if (CHECK(res != uprobe_idx + 1, "check_uprobe_res",
- "wrong uprobe res: %d\n", res))
- goto cleanup;
-
- err = bpf_map_lookup_elem(results_map_fd, &uretprobe_idx, &res);
- if (CHECK(err, "get_uretprobe_res",
- "failed to get uretprobe res: %d\n", err))
+ if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res",
+ "wrong uprobe res: %d\n", skel->bss->uprobe_res))
goto cleanup;
- if (CHECK(res != uretprobe_idx + 1, "check_uretprobe_res",
- "wrong uretprobe res: %d\n", res))
+ if (CHECK(skel->bss->uretprobe_res != 4, "check_uretprobe_res",
+ "wrong uretprobe res: %d\n", skel->bss->uretprobe_res))
goto cleanup;
cleanup:
- bpf_link__destroy(kprobe_link);
- bpf_link__destroy(kretprobe_link);
- bpf_link__destroy(uprobe_link);
- bpf_link__destroy(uretprobe_link);
- bpf_object__close(obj);
+ test_attach_probe__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
new file mode 100644
index 000000000000..5b13f2c6c402
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include "cgroup_helpers.h"
+
+#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
+
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
+static int prog_load(void)
+{
+ struct bpf_insn prog[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = 1 */
+ BPF_EXIT_INSN(),
+ };
+ size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+
+ return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+ prog, insns_cnt, "GPL", 0,
+ bpf_log_buf, BPF_LOG_BUF_SIZE);
+}
+
+void test_cgroup_attach_autodetach(void)
+{
+ __u32 duration = 0, prog_cnt = 4, attach_flags;
+ int allow_prog[2] = {-1};
+ __u32 prog_ids[2] = {0};
+ void *ptr = NULL;
+ int cg = 0, i;
+ int attempts;
+
+ for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
+ allow_prog[i] = prog_load();
+ if (CHECK(allow_prog[i] < 0, "prog_load",
+ "verifier output:\n%s\n-------\n", bpf_log_buf))
+ goto err;
+ }
+
+ if (CHECK_FAIL(setup_cgroup_environment()))
+ goto err;
+
+ /* create a cgroup, attach two programs and remember their ids */
+ cg = create_and_get_cgroup("/cg_autodetach");
+ if (CHECK_FAIL(cg < 0))
+ goto err;
+
+ if (CHECK_FAIL(join_cgroup("/cg_autodetach")))
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(allow_prog); i++)
+ if (CHECK(bpf_prog_attach(allow_prog[i], cg,
+ BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_MULTI),
+ "prog_attach", "prog[%d], errno=%d\n", i, errno))
+ goto err;
+
+ /* make sure that programs are attached and run some traffic */
+ if (CHECK(bpf_prog_query(cg, BPF_CGROUP_INET_EGRESS, 0, &attach_flags,
+ prog_ids, &prog_cnt),
+ "prog_query", "errno=%d\n", errno))
+ goto err;
+ if (CHECK_FAIL(system(PING_CMD)))
+ goto err;
+
+ /* allocate some memory (4Mb) to pin the original cgroup */
+ ptr = malloc(4 * (1 << 20));
+ if (CHECK_FAIL(!ptr))
+ goto err;
+
+ /* close programs and cgroup fd */
+ for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
+ close(allow_prog[i]);
+ allow_prog[i] = -1;
+ }
+
+ close(cg);
+ cg = 0;
+
+ /* leave the cgroup and remove it. don't detach programs */
+ cleanup_cgroup_environment();
+
+ /* wait for the asynchronous auto-detachment.
+ * wait for no more than 5 sec and give up.
+ */
+ for (i = 0; i < ARRAY_SIZE(prog_ids); i++) {
+ for (attempts = 5; attempts >= 0; attempts--) {
+ int fd = bpf_prog_get_fd_by_id(prog_ids[i]);
+
+ if (fd < 0)
+ break;
+
+ /* don't leave the fd open */
+ close(fd);
+
+ if (CHECK_FAIL(!attempts))
+ goto err;
+
+ sleep(1);
+ }
+ }
+
+err:
+ for (i = 0; i < ARRAY_SIZE(allow_prog); i++)
+ if (allow_prog[i] >= 0)
+ close(allow_prog[i]);
+ if (cg)
+ close(cg);
+ free(ptr);
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
new file mode 100644
index 000000000000..2ff21dbce179
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include "cgroup_helpers.h"
+
+#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
+
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
+static int map_fd = -1;
+
+static int prog_load_cnt(int verdict, int val)
+{
+ int cgroup_storage_fd, percpu_cgroup_storage_fd;
+
+ if (map_fd < 0)
+ map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
+ if (map_fd < 0) {
+ printf("failed to create map '%s'\n", strerror(errno));
+ return -1;
+ }
+
+ cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE,
+ sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
+ if (cgroup_storage_fd < 0) {
+ printf("failed to create map '%s'\n", strerror(errno));
+ return -1;
+ }
+
+ percpu_cgroup_storage_fd = bpf_create_map(
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+ sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
+ if (percpu_cgroup_storage_fd < 0) {
+ printf("failed to create map '%s'\n", strerror(errno));
+ return -1;
+ }
+
+ struct bpf_insn prog[] = {
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+ BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+
+ BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_MOV64_IMM(BPF_REG_1, val),
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
+
+ BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
+ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
+
+ BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
+ BPF_EXIT_INSN(),
+ };
+ size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+ int ret;
+
+ ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+ prog, insns_cnt, "GPL", 0,
+ bpf_log_buf, BPF_LOG_BUF_SIZE);
+
+ close(cgroup_storage_fd);
+ return ret;
+}
+
+void test_cgroup_attach_multi(void)
+{
+ __u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id;
+ int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0;
+ DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts);
+ int allow_prog[7] = {-1};
+ unsigned long long value;
+ __u32 duration = 0;
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
+ allow_prog[i] = prog_load_cnt(1, 1 << i);
+ if (CHECK(allow_prog[i] < 0, "prog_load",
+ "verifier output:\n%s\n-------\n", bpf_log_buf))
+ goto err;
+ }
+
+ if (CHECK_FAIL(setup_cgroup_environment()))
+ goto err;
+
+ cg1 = create_and_get_cgroup("/cg1");
+ if (CHECK_FAIL(cg1 < 0))
+ goto err;
+ cg2 = create_and_get_cgroup("/cg1/cg2");
+ if (CHECK_FAIL(cg2 < 0))
+ goto err;
+ cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
+ if (CHECK_FAIL(cg3 < 0))
+ goto err;
+ cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
+ if (CHECK_FAIL(cg4 < 0))
+ goto err;
+ cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
+ if (CHECK_FAIL(cg5 < 0))
+ goto err;
+
+ if (CHECK_FAIL(join_cgroup("/cg1/cg2/cg3/cg4/cg5")))
+ goto err;
+
+ if (CHECK(bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_MULTI),
+ "prog0_attach_to_cg1_multi", "errno=%d\n", errno))
+ goto err;
+
+ if (CHECK(!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_MULTI),
+ "fail_same_prog_attach_to_cg1", "unexpected success\n"))
+ goto err;
+
+ if (CHECK(bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_MULTI),
+ "prog1_attach_to_cg1_multi", "errno=%d\n", errno))
+ goto err;
+
+ if (CHECK(bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE),
+ "prog2_attach_to_cg2_override", "errno=%d\n", errno))
+ goto err;
+
+ if (CHECK(bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_MULTI),
+ "prog3_attach_to_cg3_multi", "errno=%d\n", errno))
+ goto err;
+
+ if (CHECK(bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE),
+ "prog4_attach_to_cg4_override", "errno=%d\n", errno))
+ goto err;
+
+ if (CHECK(bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0),
+ "prog5_attach_to_cg5_none", "errno=%d\n", errno))
+ goto err;
+
+ CHECK_FAIL(system(PING_CMD));
+ CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
+ CHECK_FAIL(value != 1 + 2 + 8 + 32);
+
+ /* query the number of effective progs in cg5 */
+ CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
+ BPF_F_QUERY_EFFECTIVE, NULL, NULL, &prog_cnt));
+ CHECK_FAIL(prog_cnt != 4);
+ /* retrieve prog_ids of effective progs in cg5 */
+ CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
+ BPF_F_QUERY_EFFECTIVE, &attach_flags,
+ prog_ids, &prog_cnt));
+ CHECK_FAIL(prog_cnt != 4);
+ CHECK_FAIL(attach_flags != 0);
+ saved_prog_id = prog_ids[0];
+ /* check enospc handling */
+ prog_ids[0] = 0;
+ prog_cnt = 2;
+ CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
+ BPF_F_QUERY_EFFECTIVE, &attach_flags,
+ prog_ids, &prog_cnt) != -1);
+ CHECK_FAIL(errno != ENOSPC);
+ CHECK_FAIL(prog_cnt != 4);
+ /* check that prog_ids are returned even when buffer is too small */
+ CHECK_FAIL(prog_ids[0] != saved_prog_id);
+ /* retrieve prog_id of single attached prog in cg5 */
+ prog_ids[0] = 0;
+ CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL,
+ prog_ids, &prog_cnt));
+ CHECK_FAIL(prog_cnt != 1);
+ CHECK_FAIL(prog_ids[0] != saved_prog_id);
+
+ /* detach bottom program and ping again */
+ if (CHECK(bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS),
+ "prog_detach_from_cg5", "errno=%d\n", errno))
+ goto err;
+
+ value = 0;
+ CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0));
+ CHECK_FAIL(system(PING_CMD));
+ CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
+ CHECK_FAIL(value != 1 + 2 + 8 + 16);
+
+ /* test replace */
+
+ attach_opts.flags = BPF_F_ALLOW_OVERRIDE | BPF_F_REPLACE;
+ attach_opts.replace_prog_fd = allow_prog[0];
+ if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1,
+ BPF_CGROUP_INET_EGRESS, &attach_opts),
+ "fail_prog_replace_override", "unexpected success\n"))
+ goto err;
+ CHECK_FAIL(errno != EINVAL);
+
+ attach_opts.flags = BPF_F_REPLACE;
+ if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1,
+ BPF_CGROUP_INET_EGRESS, &attach_opts),
+ "fail_prog_replace_no_multi", "unexpected success\n"))
+ goto err;
+ CHECK_FAIL(errno != EINVAL);
+
+ attach_opts.flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE;
+ attach_opts.replace_prog_fd = -1;
+ if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1,
+ BPF_CGROUP_INET_EGRESS, &attach_opts),
+ "fail_prog_replace_bad_fd", "unexpected success\n"))
+ goto err;
+ CHECK_FAIL(errno != EBADF);
+
+ /* replacing a program that is not attached to cgroup should fail */
+ attach_opts.replace_prog_fd = allow_prog[3];
+ if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1,
+ BPF_CGROUP_INET_EGRESS, &attach_opts),
+ "fail_prog_replace_no_ent", "unexpected success\n"))
+ goto err;
+ CHECK_FAIL(errno != ENOENT);
+
+ /* replace 1st from the top program */
+ attach_opts.replace_prog_fd = allow_prog[0];
+ if (CHECK(bpf_prog_attach_xattr(allow_prog[6], cg1,
+ BPF_CGROUP_INET_EGRESS, &attach_opts),
+ "prog_replace", "errno=%d\n", errno))
+ goto err;
+
+ value = 0;
+ CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0));
+ CHECK_FAIL(system(PING_CMD));
+ CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
+ CHECK_FAIL(value != 64 + 2 + 8 + 16);
+
+ /* detach 3rd from bottom program and ping again */
+ if (CHECK(!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS),
+ "fail_prog_detach_from_cg3", "unexpected success\n"))
+ goto err;
+
+ if (CHECK(bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS),
+ "prog3_detach_from_cg3", "errno=%d\n", errno))
+ goto err;
+
+ value = 0;
+ CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0));
+ CHECK_FAIL(system(PING_CMD));
+ CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
+ CHECK_FAIL(value != 64 + 2 + 16);
+
+ /* detach 2nd from bottom program and ping again */
+ if (CHECK(bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS),
+ "prog_detach_from_cg4", "errno=%d\n", errno))
+ goto err;
+
+ value = 0;
+ CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0));
+ CHECK_FAIL(system(PING_CMD));
+ CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
+ CHECK_FAIL(value != 64 + 2 + 4);
+
+ prog_cnt = 4;
+ CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
+ BPF_F_QUERY_EFFECTIVE, &attach_flags,
+ prog_ids, &prog_cnt));
+ CHECK_FAIL(prog_cnt != 3);
+ CHECK_FAIL(attach_flags != 0);
+ CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL,
+ prog_ids, &prog_cnt));
+ CHECK_FAIL(prog_cnt != 0);
+
+err:
+ for (i = 0; i < ARRAY_SIZE(allow_prog); i++)
+ if (allow_prog[i] >= 0)
+ close(allow_prog[i]);
+ close(cg1);
+ close(cg2);
+ close(cg3);
+ close(cg4);
+ close(cg5);
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
new file mode 100644
index 000000000000..9d8cb48b99de
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include "cgroup_helpers.h"
+
+#define FOO "/foo"
+#define BAR "/foo/bar/"
+#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
+
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
+static int prog_load(int verdict)
+{
+ struct bpf_insn prog[] = {
+ BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
+ BPF_EXIT_INSN(),
+ };
+ size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+
+ return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+ prog, insns_cnt, "GPL", 0,
+ bpf_log_buf, BPF_LOG_BUF_SIZE);
+}
+
+void test_cgroup_attach_override(void)
+{
+ int drop_prog = -1, allow_prog = -1, foo = -1, bar = -1;
+ __u32 duration = 0;
+
+ allow_prog = prog_load(1);
+ if (CHECK(allow_prog < 0, "prog_load_allow",
+ "verifier output:\n%s\n-------\n", bpf_log_buf))
+ goto err;
+
+ drop_prog = prog_load(0);
+ if (CHECK(drop_prog < 0, "prog_load_drop",
+ "verifier output:\n%s\n-------\n", bpf_log_buf))
+ goto err;
+
+ foo = test__join_cgroup(FOO);
+ if (CHECK(foo < 0, "cgroup_join_foo", "cgroup setup failed\n"))
+ goto err;
+
+ if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE),
+ "prog_attach_drop_foo_override",
+ "attach prog to %s failed, errno=%d\n", FOO, errno))
+ goto err;
+
+ if (CHECK(!system(PING_CMD), "ping_fail",
+ "ping unexpectedly succeeded\n"))
+ goto err;
+
+ bar = test__join_cgroup(BAR);
+ if (CHECK(bar < 0, "cgroup_join_bar", "cgroup setup failed\n"))
+ goto err;
+
+ if (CHECK(!system(PING_CMD), "ping_fail",
+ "ping unexpectedly succeeded\n"))
+ goto err;
+
+ if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE),
+ "prog_attach_allow_bar_override",
+ "attach prog to %s failed, errno=%d\n", BAR, errno))
+ goto err;
+
+ if (CHECK(system(PING_CMD), "ping_ok", "ping failed\n"))
+ goto err;
+
+ if (CHECK(bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS),
+ "prog_detach_bar",
+ "detach prog from %s failed, errno=%d\n", BAR, errno))
+ goto err;
+
+ if (CHECK(!system(PING_CMD), "ping_fail",
+ "ping unexpectedly succeeded\n"))
+ goto err;
+
+ if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE),
+ "prog_attach_allow_bar_override",
+ "attach prog to %s failed, errno=%d\n", BAR, errno))
+ goto err;
+
+ if (CHECK(bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS),
+ "prog_detach_foo",
+ "detach prog from %s failed, errno=%d\n", FOO, errno))
+ goto err;
+
+ if (CHECK(system(PING_CMD), "ping_ok", "ping failed\n"))
+ goto err;
+
+ if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE),
+ "prog_attach_allow_bar_override",
+ "attach prog to %s failed, errno=%d\n", BAR, errno))
+ goto err;
+
+ if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0),
+ "fail_prog_attach_allow_bar_none",
+ "attach prog to %s unexpectedly succeeded\n", BAR))
+ goto err;
+
+ if (CHECK(bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS),
+ "prog_detach_bar",
+ "detach prog from %s failed, errno=%d\n", BAR, errno))
+ goto err;
+
+ if (CHECK(!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS),
+ "fail_prog_detach_foo",
+ "double detach from %s unexpectedly succeeded\n", FOO))
+ goto err;
+
+ if (CHECK(bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0),
+ "prog_attach_allow_foo_none",
+ "attach prog to %s failed, errno=%d\n", FOO, errno))
+ goto err;
+
+ if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0),
+ "fail_prog_attach_allow_bar_none",
+ "attach prog to %s unexpectedly succeeded\n", BAR))
+ goto err;
+
+ if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE),
+ "fail_prog_attach_allow_bar_override",
+ "attach prog to %s unexpectedly succeeded\n", BAR))
+ goto err;
+
+ if (CHECK(!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE),
+ "fail_prog_attach_allow_foo_override",
+ "attach prog to %s unexpectedly succeeded\n", FOO))
+ goto err;
+
+ if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0),
+ "prog_attach_drop_foo_none",
+ "attach prog to %s failed, errno=%d\n", FOO, errno))
+ goto err;
+
+err:
+ close(foo);
+ close(bar);
+ close(allow_prog);
+ close(drop_prog);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_extern.c b/tools/testing/selftests/bpf/prog_tests/core_extern.c
new file mode 100644
index 000000000000..b093787e9448
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/core_extern.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <test_progs.h>
+#include <sys/mman.h>
+#include <sys/utsname.h>
+#include <linux/version.h>
+#include "test_core_extern.skel.h"
+
+static uint32_t get_kernel_version(void)
+{
+ uint32_t major, minor, patch;
+ struct utsname info;
+
+ uname(&info);
+ if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
+ return 0;
+ return KERNEL_VERSION(major, minor, patch);
+}
+
+#define CFG "CONFIG_BPF_SYSCALL=n\n"
+
+static struct test_case {
+ const char *name;
+ const char *cfg;
+ bool fails;
+ struct test_core_extern__data data;
+} test_cases[] = {
+ { .name = "default search path", .data = { .bpf_syscall = true } },
+ {
+ .name = "custom values",
+ .cfg = "CONFIG_BPF_SYSCALL=n\n"
+ "CONFIG_TRISTATE=m\n"
+ "CONFIG_BOOL=y\n"
+ "CONFIG_CHAR=100\n"
+ "CONFIG_USHORT=30000\n"
+ "CONFIG_INT=123456\n"
+ "CONFIG_ULONG=0xDEADBEEFC0DE\n"
+ "CONFIG_STR=\"abracad\"\n"
+ "CONFIG_MISSING=0",
+ .data = {
+ .bpf_syscall = false,
+ .tristate_val = TRI_MODULE,
+ .bool_val = true,
+ .char_val = 100,
+ .ushort_val = 30000,
+ .int_val = 123456,
+ .ulong_val = 0xDEADBEEFC0DE,
+ .str_val = "abracad",
+ },
+ },
+ /* TRISTATE */
+ { .name = "tristate (y)", .cfg = CFG"CONFIG_TRISTATE=y\n",
+ .data = { .tristate_val = TRI_YES } },
+ { .name = "tristate (n)", .cfg = CFG"CONFIG_TRISTATE=n\n",
+ .data = { .tristate_val = TRI_NO } },
+ { .name = "tristate (m)", .cfg = CFG"CONFIG_TRISTATE=m\n",
+ .data = { .tristate_val = TRI_MODULE } },
+ { .name = "tristate (int)", .fails = 1, .cfg = CFG"CONFIG_TRISTATE=1" },
+ { .name = "tristate (bad)", .fails = 1, .cfg = CFG"CONFIG_TRISTATE=M" },
+ /* BOOL */
+ { .name = "bool (y)", .cfg = CFG"CONFIG_BOOL=y\n",
+ .data = { .bool_val = true } },
+ { .name = "bool (n)", .cfg = CFG"CONFIG_BOOL=n\n",
+ .data = { .bool_val = false } },
+ { .name = "bool (tristate)", .fails = 1, .cfg = CFG"CONFIG_BOOL=m" },
+ { .name = "bool (int)", .fails = 1, .cfg = CFG"CONFIG_BOOL=1" },
+ /* CHAR */
+ { .name = "char (tristate)", .cfg = CFG"CONFIG_CHAR=m\n",
+ .data = { .char_val = 'm' } },
+ { .name = "char (bad)", .fails = 1, .cfg = CFG"CONFIG_CHAR=q\n" },
+ { .name = "char (empty)", .fails = 1, .cfg = CFG"CONFIG_CHAR=\n" },
+ { .name = "char (str)", .fails = 1, .cfg = CFG"CONFIG_CHAR=\"y\"\n" },
+ /* STRING */
+ { .name = "str (empty)", .cfg = CFG"CONFIG_STR=\"\"\n",
+ .data = { .str_val = "\0\0\0\0\0\0\0" } },
+ { .name = "str (padded)", .cfg = CFG"CONFIG_STR=\"abra\"\n",
+ .data = { .str_val = "abra\0\0\0" } },
+ { .name = "str (too long)", .cfg = CFG"CONFIG_STR=\"abracada\"\n",
+ .data = { .str_val = "abracad" } },
+ { .name = "str (no value)", .fails = 1, .cfg = CFG"CONFIG_STR=\n" },
+ { .name = "str (bad value)", .fails = 1, .cfg = CFG"CONFIG_STR=bla\n" },
+ /* INTEGERS */
+ {
+ .name = "integer forms",
+ .cfg = CFG
+ "CONFIG_CHAR=0xA\n"
+ "CONFIG_USHORT=0462\n"
+ "CONFIG_INT=-100\n"
+ "CONFIG_ULONG=+1000000000000",
+ .data = {
+ .char_val = 0xA,
+ .ushort_val = 0462,
+ .int_val = -100,
+ .ulong_val = 1000000000000,
+ },
+ },
+ { .name = "int (bad)", .fails = 1, .cfg = CFG"CONFIG_INT=abc" },
+ { .name = "int (str)", .fails = 1, .cfg = CFG"CONFIG_INT=\"abc\"" },
+ { .name = "int (empty)", .fails = 1, .cfg = CFG"CONFIG_INT=" },
+ { .name = "int (mixed)", .fails = 1, .cfg = CFG"CONFIG_INT=123abc" },
+ { .name = "int (max)", .cfg = CFG"CONFIG_INT=2147483647",
+ .data = { .int_val = 2147483647 } },
+ { .name = "int (min)", .cfg = CFG"CONFIG_INT=-2147483648",
+ .data = { .int_val = -2147483648 } },
+ { .name = "int (max+1)", .fails = 1, .cfg = CFG"CONFIG_INT=2147483648" },
+ { .name = "int (min-1)", .fails = 1, .cfg = CFG"CONFIG_INT=-2147483649" },
+ { .name = "ushort (max)", .cfg = CFG"CONFIG_USHORT=65535",
+ .data = { .ushort_val = 65535 } },
+ { .name = "ushort (min)", .cfg = CFG"CONFIG_USHORT=0",
+ .data = { .ushort_val = 0 } },
+ { .name = "ushort (max+1)", .fails = 1, .cfg = CFG"CONFIG_USHORT=65536" },
+ { .name = "ushort (min-1)", .fails = 1, .cfg = CFG"CONFIG_USHORT=-1" },
+ { .name = "u64 (max)", .cfg = CFG"CONFIG_ULONG=0xffffffffffffffff",
+ .data = { .ulong_val = 0xffffffffffffffff } },
+ { .name = "u64 (min)", .cfg = CFG"CONFIG_ULONG=0",
+ .data = { .ulong_val = 0 } },
+ { .name = "u64 (max+1)", .fails = 1, .cfg = CFG"CONFIG_ULONG=0x10000000000000000" },
+};
+
+void test_core_extern(void)
+{
+ const uint32_t kern_ver = get_kernel_version();
+ int err, duration = 0, i, j;
+ struct test_core_extern *skel = NULL;
+ uint64_t *got, *exp;
+ int n = sizeof(*skel->data) / sizeof(uint64_t);
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ struct test_case *t = &test_cases[i];
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .kconfig = t->cfg,
+ );
+
+ if (!test__start_subtest(t->name))
+ continue;
+
+ skel = test_core_extern__open_opts(&opts);
+ if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
+ goto cleanup;
+ err = test_core_extern__load(skel);
+ if (t->fails) {
+ CHECK(!err, "skel_load",
+ "shouldn't succeed open/load of skeleton\n");
+ goto cleanup;
+ } else if (CHECK(err, "skel_load",
+ "failed to open/load skeleton\n")) {
+ goto cleanup;
+ }
+ err = test_core_extern__attach(skel);
+ if (CHECK(err, "attach_raw_tp", "failed attach: %d\n", err))
+ goto cleanup;
+
+ usleep(1);
+
+ t->data.kern_ver = kern_ver;
+ t->data.missing_val = 0xDEADC0DE;
+ got = (uint64_t *)skel->data;
+ exp = (uint64_t *)&t->data;
+ for (j = 0; j < n; j++) {
+ CHECK(got[j] != exp[j], "check_res",
+ "result #%d: expected %lx, but got %lx\n",
+ j, exp[j], got[j]);
+ }
+cleanup:
+ test_core_extern__destroy(skel);
+ skel = NULL;
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
index 05fe85281ff7..31e177adbdf1 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
@@ -74,6 +74,7 @@
.b123 = 2, \
.c1c = 3, \
.d00d = 4, \
+ .f10c = 0, \
}, \
.output_len = sizeof(struct core_reloc_arrays_output) \
}
@@ -308,12 +309,15 @@ static struct core_reloc_test_case test_cases[] = {
ARRAYS_CASE(arrays),
ARRAYS_CASE(arrays___diff_arr_dim),
ARRAYS_CASE(arrays___diff_arr_val_sz),
+ ARRAYS_CASE(arrays___equiv_zero_sz_arr),
+ ARRAYS_CASE(arrays___fixed_arr),
ARRAYS_ERR_CASE(arrays___err_too_small),
ARRAYS_ERR_CASE(arrays___err_too_shallow),
ARRAYS_ERR_CASE(arrays___err_non_array),
ARRAYS_ERR_CASE(arrays___err_wrong_val_type1),
ARRAYS_ERR_CASE(arrays___err_wrong_val_type2),
+ ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr),
/* enum/ptr/int handling scenarios */
PRIMITIVES_CASE(primitives),
diff --git a/tools/testing/selftests/bpf/prog_tests/cpu_mask.c b/tools/testing/selftests/bpf/prog_tests/cpu_mask.c
new file mode 100644
index 000000000000..1fa1bdbaffa9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cpu_mask.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "libbpf_internal.h"
+
+static int duration = 0;
+
+static void validate_mask(int case_nr, const char *exp, bool *mask, int n)
+{
+ int i;
+
+ for (i = 0; exp[i]; i++) {
+ if (exp[i] == '1') {
+ if (CHECK(i + 1 > n, "mask_short",
+ "case #%d: mask too short, got n=%d, need at least %d\n",
+ case_nr, n, i + 1))
+ return;
+ CHECK(!mask[i], "cpu_not_set",
+ "case #%d: mask differs, expected cpu#%d SET\n",
+ case_nr, i);
+ } else {
+ CHECK(i < n && mask[i], "cpu_set",
+ "case #%d: mask differs, expected cpu#%d UNSET\n",
+ case_nr, i);
+ }
+ }
+ CHECK(i < n, "mask_long",
+ "case #%d: mask too long, got n=%d, expected at most %d\n",
+ case_nr, n, i);
+}
+
+static struct {
+ const char *cpu_mask;
+ const char *expect;
+ bool fails;
+} test_cases[] = {
+ { "0\n", "1", false },
+ { "0,2\n", "101", false },
+ { "0-2\n", "111", false },
+ { "0-2,3-4\n", "11111", false },
+ { "0", "1", false },
+ { "0-2", "111", false },
+ { "0,2", "101", false },
+ { "0,1-3", "1111", false },
+ { "0,1,2,3", "1111", false },
+ { "0,2-3,5", "101101", false },
+ { "3-3", "0001", false },
+ { "2-4,6,9-10", "00111010011", false },
+ /* failure cases */
+ { "", "", true },
+ { "0-", "", true },
+ { "0 ", "", true },
+ { "0_1", "", true },
+ { "1-0", "", true },
+ { "-1", "", true },
+};
+
+void test_cpu_mask()
+{
+ int i, err, n;
+ bool *mask;
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ mask = NULL;
+ err = parse_cpu_mask_str(test_cases[i].cpu_mask, &mask, &n);
+ if (test_cases[i].fails) {
+ CHECK(!err, "should_fail",
+ "case #%d: parsing should fail!\n", i + 1);
+ } else {
+ if (CHECK(err, "parse_err",
+ "case #%d: cpu mask parsing failed: %d\n",
+ i + 1, err))
+ continue;
+ validate_mask(i + 1, test_cases[i].expect, mask, n);
+ }
+ free(mask);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
index 40bcff2cc274..235ac4f67f5b 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
@@ -1,90 +1,55 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
+#include "test_pkt_access.skel.h"
+#include "fentry_test.skel.h"
+#include "fexit_test.skel.h"
void test_fentry_fexit(void)
{
- struct bpf_prog_load_attr attr_fentry = {
- .file = "./fentry_test.o",
- };
- struct bpf_prog_load_attr attr_fexit = {
- .file = "./fexit_test.o",
- };
-
- struct bpf_object *obj_fentry = NULL, *obj_fexit = NULL, *pkt_obj;
- struct bpf_map *data_map_fentry, *data_map_fexit;
- char fentry_name[] = "fentry/bpf_fentry_testX";
- char fexit_name[] = "fexit/bpf_fentry_testX";
- int err, pkt_fd, kfree_skb_fd, i;
- struct bpf_link *link[12] = {};
- struct bpf_program *prog[12];
- __u32 duration, retval;
- const int zero = 0;
- u64 result[12];
-
- err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
- &pkt_obj, &pkt_fd);
- if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
+ struct test_pkt_access *pkt_skel = NULL;
+ struct fentry_test *fentry_skel = NULL;
+ struct fexit_test *fexit_skel = NULL;
+ __u64 *fentry_res, *fexit_res;
+ __u32 duration = 0, retval;
+ int err, pkt_fd, i;
+
+ pkt_skel = test_pkt_access__open_and_load();
+ if (CHECK(!pkt_skel, "pkt_skel_load", "pkt_access skeleton failed\n"))
return;
- err = bpf_prog_load_xattr(&attr_fentry, &obj_fentry, &kfree_skb_fd);
- if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
+ fentry_skel = fentry_test__open_and_load();
+ if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
goto close_prog;
- err = bpf_prog_load_xattr(&attr_fexit, &obj_fexit, &kfree_skb_fd);
- if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
+ fexit_skel = fexit_test__open_and_load();
+ if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
goto close_prog;
- for (i = 0; i < 6; i++) {
- fentry_name[sizeof(fentry_name) - 2] = '1' + i;
- prog[i] = bpf_object__find_program_by_title(obj_fentry, fentry_name);
- if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fentry_name))
- goto close_prog;
- link[i] = bpf_program__attach_trace(prog[i]);
- if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
- goto close_prog;
- }
- data_map_fentry = bpf_object__find_map_by_name(obj_fentry, "fentry_t.bss");
- if (CHECK(!data_map_fentry, "find_data_map", "data map not found\n"))
+ err = fentry_test__attach(fentry_skel);
+ if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err))
goto close_prog;
-
- for (i = 6; i < 12; i++) {
- fexit_name[sizeof(fexit_name) - 2] = '1' + i - 6;
- prog[i] = bpf_object__find_program_by_title(obj_fexit, fexit_name);
- if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fexit_name))
- goto close_prog;
- link[i] = bpf_program__attach_trace(prog[i]);
- if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
- goto close_prog;
- }
- data_map_fexit = bpf_object__find_map_by_name(obj_fexit, "fexit_te.bss");
- if (CHECK(!data_map_fexit, "find_data_map", "data map not found\n"))
+ err = fexit_test__attach(fexit_skel);
+ if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
goto close_prog;
+ pkt_fd = bpf_program__fd(pkt_skel->progs.test_pkt_access);
err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
NULL, NULL, &retval, &duration);
CHECK(err || retval, "ipv6",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
- err = bpf_map_lookup_elem(bpf_map__fd(data_map_fentry), &zero, &result);
- if (CHECK(err, "get_result",
- "failed to get output data: %d\n", err))
- goto close_prog;
-
- err = bpf_map_lookup_elem(bpf_map__fd(data_map_fexit), &zero, result + 6);
- if (CHECK(err, "get_result",
- "failed to get output data: %d\n", err))
- goto close_prog;
-
- for (i = 0; i < 12; i++)
- if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
- i % 6 + 1, result[i]))
- goto close_prog;
+ fentry_res = (__u64 *)fentry_skel->bss;
+ fexit_res = (__u64 *)fexit_skel->bss;
+ printf("%lld\n", fentry_skel->bss->test1_result);
+ for (i = 0; i < 6; i++) {
+ CHECK(fentry_res[i] != 1, "result",
+ "fentry_test%d failed err %lld\n", i + 1, fentry_res[i]);
+ CHECK(fexit_res[i] != 1, "result",
+ "fexit_test%d failed err %lld\n", i + 1, fexit_res[i]);
+ }
close_prog:
- for (i = 0; i < 12; i++)
- if (!IS_ERR_OR_NULL(link[i]))
- bpf_link__destroy(link[i]);
- bpf_object__close(obj_fentry);
- bpf_object__close(obj_fexit);
- bpf_object__close(pkt_obj);
+ test_pkt_access__destroy(pkt_skel);
+ fentry_test__destroy(fentry_skel);
+ fexit_test__destroy(fexit_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
index 9fb103193878..e1a379f5f7d2 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
@@ -1,64 +1,43 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
+#include "test_pkt_access.skel.h"
+#include "fentry_test.skel.h"
void test_fentry_test(void)
{
- struct bpf_prog_load_attr attr = {
- .file = "./fentry_test.o",
- };
-
- char prog_name[] = "fentry/bpf_fentry_testX";
- struct bpf_object *obj = NULL, *pkt_obj;
- int err, pkt_fd, kfree_skb_fd, i;
- struct bpf_link *link[6] = {};
- struct bpf_program *prog[6];
+ struct test_pkt_access *pkt_skel = NULL;
+ struct fentry_test *fentry_skel = NULL;
+ int err, pkt_fd, i;
__u32 duration, retval;
- struct bpf_map *data_map;
- const int zero = 0;
- u64 result[6];
+ __u64 *result;
- err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
- &pkt_obj, &pkt_fd);
- if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
+ pkt_skel = test_pkt_access__open_and_load();
+ if (CHECK(!pkt_skel, "pkt_skel_load", "pkt_access skeleton failed\n"))
return;
- err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd);
- if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
- goto close_prog;
+ fentry_skel = fentry_test__open_and_load();
+ if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
+ goto cleanup;
- for (i = 0; i < 6; i++) {
- prog_name[sizeof(prog_name) - 2] = '1' + i;
- prog[i] = bpf_object__find_program_by_title(obj, prog_name);
- if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name))
- goto close_prog;
- link[i] = bpf_program__attach_trace(prog[i]);
- if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
- goto close_prog;
- }
- data_map = bpf_object__find_map_by_name(obj, "fentry_t.bss");
- if (CHECK(!data_map, "find_data_map", "data map not found\n"))
- goto close_prog;
+ err = fentry_test__attach(fentry_skel);
+ if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err))
+ goto cleanup;
+ pkt_fd = bpf_program__fd(pkt_skel->progs.test_pkt_access);
err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
NULL, NULL, &retval, &duration);
CHECK(err || retval, "ipv6",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
- err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
- if (CHECK(err, "get_result",
- "failed to get output data: %d\n", err))
- goto close_prog;
-
- for (i = 0; i < 6; i++)
- if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
- i + 1, result[i]))
- goto close_prog;
+ result = (__u64 *)fentry_skel->bss;
+ for (i = 0; i < 6; i++) {
+ if (CHECK(result[i] != 1, "result",
+ "fentry_test%d failed err %lld\n", i + 1, result[i]))
+ goto cleanup;
+ }
-close_prog:
- for (i = 0; i < 6; i++)
- if (!IS_ERR_OR_NULL(link[i]))
- bpf_link__destroy(link[i]);
- bpf_object__close(obj);
- bpf_object__close(pkt_obj);
+cleanup:
+ fentry_test__destroy(fentry_skel);
+ test_pkt_access__destroy(pkt_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c
index 051a6d48762c..16a814eb4d64 100644
--- a/tools/testing/selftests/bpf/prog_tests/mmap.c
+++ b/tools/testing/selftests/bpf/prog_tests/mmap.c
@@ -1,16 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <sys/mman.h>
+#include "test_mmap.skel.h"
struct map_data {
__u64 val[512 * 4];
};
-struct bss_data {
- __u64 in_val;
- __u64 out_val;
-};
-
static size_t roundup_page(size_t sz)
{
long page_size = sysconf(_SC_PAGE_SIZE);
@@ -19,41 +15,25 @@ static size_t roundup_page(size_t sz)
void test_mmap(void)
{
- const char *file = "test_mmap.o";
- const char *probe_name = "raw_tracepoint/sys_enter";
- const char *tp_name = "sys_enter";
- const size_t bss_sz = roundup_page(sizeof(struct bss_data));
+ const size_t bss_sz = roundup_page(sizeof(struct test_mmap__bss));
const size_t map_sz = roundup_page(sizeof(struct map_data));
const int zero = 0, one = 1, two = 2, far = 1500;
const long page_size = sysconf(_SC_PAGE_SIZE);
int err, duration = 0, i, data_map_fd;
- struct bpf_program *prog;
- struct bpf_object *obj;
- struct bpf_link *link = NULL;
struct bpf_map *data_map, *bss_map;
void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
- volatile struct bss_data *bss_data;
- volatile struct map_data *map_data;
+ struct test_mmap__bss *bss_data;
+ struct map_data *map_data;
+ struct test_mmap *skel;
__u64 val = 0;
- obj = bpf_object__open_file("test_mmap.o", NULL);
- if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n",
- file, PTR_ERR(obj)))
+
+ skel = test_mmap__open_and_load();
+ if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
return;
- prog = bpf_object__find_program_by_title(obj, probe_name);
- if (CHECK(!prog, "find_probe", "prog '%s' not found\n", probe_name))
- goto cleanup;
- err = bpf_object__load(obj);
- if (CHECK(err, "obj_load", "failed to load prog '%s': %d\n",
- probe_name, err))
- goto cleanup;
- bss_map = bpf_object__find_map_by_name(obj, "test_mma.bss");
- if (CHECK(!bss_map, "find_bss_map", ".bss map not found\n"))
- goto cleanup;
- data_map = bpf_object__find_map_by_name(obj, "data_map");
- if (CHECK(!data_map, "find_data_map", "data_map map not found\n"))
- goto cleanup;
+ bss_map = skel->maps.bss;
+ data_map = skel->maps.data_map;
data_map_fd = bpf_map__fd(data_map);
bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -77,13 +57,15 @@ void test_mmap(void)
CHECK_FAIL(bss_data->in_val);
CHECK_FAIL(bss_data->out_val);
+ CHECK_FAIL(skel->bss->in_val);
+ CHECK_FAIL(skel->bss->out_val);
CHECK_FAIL(map_data->val[0]);
CHECK_FAIL(map_data->val[1]);
CHECK_FAIL(map_data->val[2]);
CHECK_FAIL(map_data->val[far]);
- link = bpf_program__attach_raw_tracepoint(prog, tp_name);
- if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
+ err = test_mmap__attach(skel);
+ if (CHECK(err, "attach_raw_tp", "err %d\n", err))
goto cleanup;
bss_data->in_val = 123;
@@ -94,6 +76,8 @@ void test_mmap(void)
CHECK_FAIL(bss_data->in_val != 123);
CHECK_FAIL(bss_data->out_val != 123);
+ CHECK_FAIL(skel->bss->in_val != 123);
+ CHECK_FAIL(skel->bss->out_val != 123);
CHECK_FAIL(map_data->val[0] != 111);
CHECK_FAIL(map_data->val[1] != 222);
CHECK_FAIL(map_data->val[2] != 123);
@@ -160,6 +144,8 @@ void test_mmap(void)
usleep(1);
CHECK_FAIL(bss_data->in_val != 321);
CHECK_FAIL(bss_data->out_val != 321);
+ CHECK_FAIL(skel->bss->in_val != 321);
+ CHECK_FAIL(skel->bss->out_val != 321);
CHECK_FAIL(map_data->val[0] != 111);
CHECK_FAIL(map_data->val[1] != 222);
CHECK_FAIL(map_data->val[2] != 321);
@@ -203,6 +189,8 @@ void test_mmap(void)
map_data = tmp2;
CHECK_FAIL(bss_data->in_val != 321);
CHECK_FAIL(bss_data->out_val != 321);
+ CHECK_FAIL(skel->bss->in_val != 321);
+ CHECK_FAIL(skel->bss->out_val != 321);
CHECK_FAIL(map_data->val[0] != 111);
CHECK_FAIL(map_data->val[1] != 222);
CHECK_FAIL(map_data->val[2] != 321);
@@ -214,7 +202,5 @@ cleanup:
CHECK_FAIL(munmap(bss_mmaped, bss_sz));
if (map_mmaped)
CHECK_FAIL(munmap(map_mmaped, map_sz));
- if (!IS_ERR_OR_NULL(link))
- bpf_link__destroy(link);
- bpf_object__close(obj);
+ test_mmap__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
index 3003fddc0613..cf6c87936c69 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
@@ -4,6 +4,7 @@
#include <sched.h>
#include <sys/socket.h>
#include <test_progs.h>
+#include "libbpf_internal.h"
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
{
@@ -19,7 +20,7 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
void test_perf_buffer(void)
{
- int err, prog_fd, nr_cpus, i, duration = 0;
+ int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0;
const char *prog_name = "kprobe/sys_nanosleep";
const char *file = "./test_perf_buffer.o";
struct perf_buffer_opts pb_opts = {};
@@ -29,15 +30,27 @@ void test_perf_buffer(void)
struct bpf_object *obj;
struct perf_buffer *pb;
struct bpf_link *link;
+ bool *online;
nr_cpus = libbpf_num_possible_cpus();
if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus))
return;
+ err = parse_cpu_mask_file("/sys/devices/system/cpu/online",
+ &online, &on_len);
+ if (CHECK(err, "nr_on_cpus", "err %d\n", err))
+ return;
+
+ for (i = 0; i < on_len; i++)
+ if (online[i])
+ nr_on_cpus++;
+
/* load program */
err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
- if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
- return;
+ if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) {
+ obj = NULL;
+ goto out_close;
+ }
prog = bpf_object__find_program_by_title(obj, prog_name);
if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
@@ -64,6 +77,11 @@ void test_perf_buffer(void)
/* trigger kprobe on every CPU */
CPU_ZERO(&cpu_seen);
for (i = 0; i < nr_cpus; i++) {
+ if (i >= on_len || !online[i]) {
+ printf("skipping offline CPU #%d\n", i);
+ continue;
+ }
+
CPU_ZERO(&cpu_set);
CPU_SET(i, &cpu_set);
@@ -81,8 +99,8 @@ void test_perf_buffer(void)
if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
goto out_free_pb;
- if (CHECK(CPU_COUNT(&cpu_seen) != nr_cpus, "seen_cpu_cnt",
- "expect %d, seen %d\n", nr_cpus, CPU_COUNT(&cpu_seen)))
+ if (CHECK(CPU_COUNT(&cpu_seen) != nr_on_cpus, "seen_cpu_cnt",
+ "expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen)))
goto out_free_pb;
out_free_pb:
@@ -91,4 +109,5 @@ out_detach:
bpf_link__destroy(link);
out_close:
bpf_object__close(obj);
+ free(online);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c
index 8a3187dec048..7aecfd9e87d1 100644
--- a/tools/testing/selftests/bpf/prog_tests/probe_user.c
+++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c
@@ -3,8 +3,7 @@
void test_probe_user(void)
{
-#define kprobe_name "__sys_connect"
- const char *prog_name = "kprobe/" kprobe_name;
+ const char *prog_name = "kprobe/__sys_connect";
const char *obj_file = "./test_probe_user.o";
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, );
int err, results_map_fd, sock_fd, duration = 0;
@@ -33,8 +32,7 @@ void test_probe_user(void)
"err %d\n", results_map_fd))
goto cleanup;
- kprobe_link = bpf_program__attach_kprobe(kprobe_prog, false,
- kprobe_name);
+ kprobe_link = bpf_program__attach(kprobe_prog);
if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
"err %ld\n", PTR_ERR(kprobe_link))) {
kprobe_link = NULL;
diff --git a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
index d90acc13d1ec..563e12120e77 100644
--- a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
+++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
@@ -16,14 +16,11 @@ struct rdonly_map_subtest {
void test_rdonly_maps(void)
{
- const char *prog_name_skip_loop = "raw_tracepoint/sys_enter:skip_loop";
- const char *prog_name_part_loop = "raw_tracepoint/sys_enter:part_loop";
- const char *prog_name_full_loop = "raw_tracepoint/sys_enter:full_loop";
const char *file = "test_rdonly_maps.o";
struct rdonly_map_subtest subtests[] = {
- { "skip loop", prog_name_skip_loop, 0, 0 },
- { "part loop", prog_name_part_loop, 3, 2 + 3 + 4 },
- { "full loop", prog_name_full_loop, 4, 2 + 3 + 4 + 5 },
+ { "skip loop", "skip_loop", 0, 0 },
+ { "part loop", "part_loop", 3, 2 + 3 + 4 },
+ { "full loop", "full_loop", 4, 2 + 3 + 4 + 5 },
};
int i, err, zero = 0, duration = 0;
struct bpf_link *link = NULL;
@@ -50,7 +47,7 @@ void test_rdonly_maps(void)
if (!test__start_subtest(t->subtest_name))
continue;
- prog = bpf_object__find_program_by_title(obj, t->prog_name);
+ prog = bpf_object__find_program_by_name(obj, t->prog_name);
if (CHECK(!prog, "find_prog", "prog '%s' not found\n",
t->prog_name))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
index 7566c13eb51a..2c37ae7dc214 100644
--- a/tools/testing/selftests/bpf/test_select_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
@@ -20,8 +20,11 @@
#include <bpf/libbpf.h>
#include "bpf_rlimit.h"
#include "bpf_util.h"
+
+#include "test_progs.h"
#include "test_select_reuseport_common.h"
+#define MAX_TEST_NAME 80
#define MIN_TCPHDR_LEN 20
#define UDPHDR_LEN 8
@@ -32,11 +35,11 @@
static int result_map, tmp_index_ovr_map, linum_map, data_check_map;
static enum result expected_results[NR_RESULTS];
static int sk_fds[REUSEPORT_ARRAY_SIZE];
-static int reuseport_array, outer_map;
+static int reuseport_array = -1, outer_map = -1;
static int select_by_skb_data_prog;
-static int saved_tcp_syncookie;
+static int saved_tcp_syncookie = -1;
static struct bpf_object *obj;
-static int saved_tcp_fo;
+static int saved_tcp_fo = -1;
static __u32 index_zero;
static int epfd;
@@ -46,16 +49,21 @@ static union sa46 {
sa_family_t family;
} srv_sa;
-#define CHECK(condition, tag, format...) ({ \
- int __ret = !!(condition); \
- if (__ret) { \
- printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \
- printf(format); \
- exit(-1); \
+#define RET_IF(condition, tag, format...) ({ \
+ if (CHECK_FAIL(condition)) { \
+ printf(tag " " format); \
+ return; \
+ } \
+})
+
+#define RET_ERR(condition, tag, format...) ({ \
+ if (CHECK_FAIL(condition)) { \
+ printf(tag " " format); \
+ return -1; \
} \
})
-static void create_maps(void)
+static int create_maps(void)
{
struct bpf_create_map_attr attr = {};
@@ -67,8 +75,8 @@ static void create_maps(void)
attr.max_entries = REUSEPORT_ARRAY_SIZE;
reuseport_array = bpf_create_map_xattr(&attr);
- CHECK(reuseport_array == -1, "creating reuseport_array",
- "reuseport_array:%d errno:%d\n", reuseport_array, errno);
+ RET_ERR(reuseport_array == -1, "creating reuseport_array",
+ "reuseport_array:%d errno:%d\n", reuseport_array, errno);
/* Creating outer_map */
attr.name = "outer_map";
@@ -78,63 +86,61 @@ static void create_maps(void)
attr.max_entries = 1;
attr.inner_map_fd = reuseport_array;
outer_map = bpf_create_map_xattr(&attr);
- CHECK(outer_map == -1, "creating outer_map",
- "outer_map:%d errno:%d\n", outer_map, errno);
+ RET_ERR(outer_map == -1, "creating outer_map",
+ "outer_map:%d errno:%d\n", outer_map, errno);
+
+ return 0;
}
-static void prepare_bpf_obj(void)
+static int prepare_bpf_obj(void)
{
struct bpf_program *prog;
struct bpf_map *map;
int err;
- struct bpf_object_open_attr attr = {
- .file = "test_select_reuseport_kern.o",
- .prog_type = BPF_PROG_TYPE_SK_REUSEPORT,
- };
- obj = bpf_object__open_xattr(&attr);
- CHECK(IS_ERR_OR_NULL(obj), "open test_select_reuseport_kern.o",
- "obj:%p PTR_ERR(obj):%ld\n", obj, PTR_ERR(obj));
-
- prog = bpf_program__next(NULL, obj);
- CHECK(!prog, "get first bpf_program", "!prog\n");
- bpf_program__set_type(prog, attr.prog_type);
+ obj = bpf_object__open("test_select_reuseport_kern.o");
+ RET_ERR(IS_ERR_OR_NULL(obj), "open test_select_reuseport_kern.o",
+ "obj:%p PTR_ERR(obj):%ld\n", obj, PTR_ERR(obj));
map = bpf_object__find_map_by_name(obj, "outer_map");
- CHECK(!map, "find outer_map", "!map\n");
+ RET_ERR(!map, "find outer_map", "!map\n");
err = bpf_map__reuse_fd(map, outer_map);
- CHECK(err, "reuse outer_map", "err:%d\n", err);
+ RET_ERR(err, "reuse outer_map", "err:%d\n", err);
err = bpf_object__load(obj);
- CHECK(err, "load bpf_object", "err:%d\n", err);
+ RET_ERR(err, "load bpf_object", "err:%d\n", err);
+ prog = bpf_program__next(NULL, obj);
+ RET_ERR(!prog, "get first bpf_program", "!prog\n");
select_by_skb_data_prog = bpf_program__fd(prog);
- CHECK(select_by_skb_data_prog == -1, "get prog fd",
- "select_by_skb_data_prog:%d\n", select_by_skb_data_prog);
+ RET_ERR(select_by_skb_data_prog == -1, "get prog fd",
+ "select_by_skb_data_prog:%d\n", select_by_skb_data_prog);
map = bpf_object__find_map_by_name(obj, "result_map");
- CHECK(!map, "find result_map", "!map\n");
+ RET_ERR(!map, "find result_map", "!map\n");
result_map = bpf_map__fd(map);
- CHECK(result_map == -1, "get result_map fd",
- "result_map:%d\n", result_map);
+ RET_ERR(result_map == -1, "get result_map fd",
+ "result_map:%d\n", result_map);
map = bpf_object__find_map_by_name(obj, "tmp_index_ovr_map");
- CHECK(!map, "find tmp_index_ovr_map", "!map\n");
+ RET_ERR(!map, "find tmp_index_ovr_map\n", "!map");
tmp_index_ovr_map = bpf_map__fd(map);
- CHECK(tmp_index_ovr_map == -1, "get tmp_index_ovr_map fd",
- "tmp_index_ovr_map:%d\n", tmp_index_ovr_map);
+ RET_ERR(tmp_index_ovr_map == -1, "get tmp_index_ovr_map fd",
+ "tmp_index_ovr_map:%d\n", tmp_index_ovr_map);
map = bpf_object__find_map_by_name(obj, "linum_map");
- CHECK(!map, "find linum_map", "!map\n");
+ RET_ERR(!map, "find linum_map", "!map\n");
linum_map = bpf_map__fd(map);
- CHECK(linum_map == -1, "get linum_map fd",
- "linum_map:%d\n", linum_map);
+ RET_ERR(linum_map == -1, "get linum_map fd",
+ "linum_map:%d\n", linum_map);
map = bpf_object__find_map_by_name(obj, "data_check_map");
- CHECK(!map, "find data_check_map", "!map\n");
+ RET_ERR(!map, "find data_check_map", "!map\n");
data_check_map = bpf_map__fd(map);
- CHECK(data_check_map == -1, "get data_check_map fd",
- "data_check_map:%d\n", data_check_map);
+ RET_ERR(data_check_map == -1, "get data_check_map fd",
+ "data_check_map:%d\n", data_check_map);
+
+ return 0;
}
static void sa46_init_loopback(union sa46 *sa, sa_family_t family)
@@ -163,65 +169,73 @@ static int read_int_sysctl(const char *sysctl)
int fd, ret;
fd = open(sysctl, 0);
- CHECK(fd == -1, "open(sysctl)", "sysctl:%s fd:%d errno:%d\n",
- sysctl, fd, errno);
+ RET_ERR(fd == -1, "open(sysctl)",
+ "sysctl:%s fd:%d errno:%d\n", sysctl, fd, errno);
ret = read(fd, buf, sizeof(buf));
- CHECK(ret <= 0, "read(sysctl)", "sysctl:%s ret:%d errno:%d\n",
- sysctl, ret, errno);
- close(fd);
+ RET_ERR(ret <= 0, "read(sysctl)",
+ "sysctl:%s ret:%d errno:%d\n", sysctl, ret, errno);
+ close(fd);
return atoi(buf);
}
-static void write_int_sysctl(const char *sysctl, int v)
+static int write_int_sysctl(const char *sysctl, int v)
{
int fd, ret, size;
char buf[16];
fd = open(sysctl, O_RDWR);
- CHECK(fd == -1, "open(sysctl)", "sysctl:%s fd:%d errno:%d\n",
- sysctl, fd, errno);
+ RET_ERR(fd == -1, "open(sysctl)",
+ "sysctl:%s fd:%d errno:%d\n", sysctl, fd, errno);
size = snprintf(buf, sizeof(buf), "%d", v);
ret = write(fd, buf, size);
- CHECK(ret != size, "write(sysctl)",
- "sysctl:%s ret:%d size:%d errno:%d\n", sysctl, ret, size, errno);
+ RET_ERR(ret != size, "write(sysctl)",
+ "sysctl:%s ret:%d size:%d errno:%d\n",
+ sysctl, ret, size, errno);
+
close(fd);
+ return 0;
}
static void restore_sysctls(void)
{
- write_int_sysctl(TCP_FO_SYSCTL, saved_tcp_fo);
- write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, saved_tcp_syncookie);
+ if (saved_tcp_fo != -1)
+ write_int_sysctl(TCP_FO_SYSCTL, saved_tcp_fo);
+ if (saved_tcp_syncookie != -1)
+ write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, saved_tcp_syncookie);
}
-static void enable_fastopen(void)
+static int enable_fastopen(void)
{
int fo;
fo = read_int_sysctl(TCP_FO_SYSCTL);
- write_int_sysctl(TCP_FO_SYSCTL, fo | 7);
+ if (fo < 0)
+ return -1;
+
+ return write_int_sysctl(TCP_FO_SYSCTL, fo | 7);
}
-static void enable_syncookie(void)
+static int enable_syncookie(void)
{
- write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 2);
+ return write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 2);
}
-static void disable_syncookie(void)
+static int disable_syncookie(void)
{
- write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 0);
+ return write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 0);
}
-static __u32 get_linum(void)
+static long get_linum(void)
{
__u32 linum;
int err;
err = bpf_map_lookup_elem(linum_map, &index_zero, &linum);
- CHECK(err == -1, "lookup_elem(linum_map)", "err:%d errno:%d\n",
- err, errno);
+ RET_ERR(err == -1, "lookup_elem(linum_map)", "err:%d errno:%d\n",
+ err, errno);
return linum;
}
@@ -237,12 +251,12 @@ static void check_data(int type, sa_family_t family, const struct cmd *cmd,
addrlen = sizeof(cli_sa);
err = getsockname(cli_fd, (struct sockaddr *)&cli_sa,
&addrlen);
- CHECK(err == -1, "getsockname(cli_fd)", "err:%d errno:%d\n",
- err, errno);
+ RET_IF(err == -1, "getsockname(cli_fd)", "err:%d errno:%d\n",
+ err, errno);
err = bpf_map_lookup_elem(data_check_map, &index_zero, &result);
- CHECK(err == -1, "lookup_elem(data_check_map)", "err:%d errno:%d\n",
- err, errno);
+ RET_IF(err == -1, "lookup_elem(data_check_map)", "err:%d errno:%d\n",
+ err, errno);
if (type == SOCK_STREAM) {
expected.len = MIN_TCPHDR_LEN;
@@ -284,22 +298,22 @@ static void check_data(int type, sa_family_t family, const struct cmd *cmd,
printf("expected: (0x%x, %u, %u)\n",
expected.eth_protocol, expected.ip_protocol,
expected.bind_inany);
- CHECK(1, "data_check result != expected",
- "bpf_prog_linum:%u\n", get_linum());
+ RET_IF(1, "data_check result != expected",
+ "bpf_prog_linum:%ld\n", get_linum());
}
- CHECK(!result.hash, "data_check result.hash empty",
- "result.hash:%u", result.hash);
+ RET_IF(!result.hash, "data_check result.hash empty",
+ "result.hash:%u", result.hash);
expected.len += cmd ? sizeof(*cmd) : 0;
if (type == SOCK_STREAM)
- CHECK(expected.len > result.len, "expected.len > result.len",
- "expected.len:%u result.len:%u bpf_prog_linum:%u\n",
- expected.len, result.len, get_linum());
+ RET_IF(expected.len > result.len, "expected.len > result.len",
+ "expected.len:%u result.len:%u bpf_prog_linum:%ld\n",
+ expected.len, result.len, get_linum());
else
- CHECK(expected.len != result.len, "expected.len != result.len",
- "expected.len:%u result.len:%u bpf_prog_linum:%u\n",
- expected.len, result.len, get_linum());
+ RET_IF(expected.len != result.len, "expected.len != result.len",
+ "expected.len:%u result.len:%u bpf_prog_linum:%ld\n",
+ expected.len, result.len, get_linum());
}
static void check_results(void)
@@ -310,8 +324,8 @@ static void check_results(void)
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_lookup_elem(result_map, &i, &results[i]);
- CHECK(err == -1, "lookup_elem(result_map)",
- "i:%u err:%d errno:%d\n", i, err, errno);
+ RET_IF(err == -1, "lookup_elem(result_map)",
+ "i:%u err:%d errno:%d\n", i, err, errno);
}
for (i = 0; i < NR_RESULTS; i++) {
@@ -337,10 +351,10 @@ static void check_results(void)
printf(", %u", expected_results[i]);
printf("]\n");
- CHECK(expected_results[broken] != results[broken],
- "unexpected result",
- "expected_results[%u] != results[%u] bpf_prog_linum:%u\n",
- broken, broken, get_linum());
+ RET_IF(expected_results[broken] != results[broken],
+ "unexpected result",
+ "expected_results[%u] != results[%u] bpf_prog_linum:%ld\n",
+ broken, broken, get_linum());
}
static int send_data(int type, sa_family_t family, void *data, size_t len,
@@ -350,17 +364,17 @@ static int send_data(int type, sa_family_t family, void *data, size_t len,
int fd, err;
fd = socket(family, type, 0);
- CHECK(fd == -1, "socket()", "fd:%d errno:%d\n", fd, errno);
+ RET_ERR(fd == -1, "socket()", "fd:%d errno:%d\n", fd, errno);
sa46_init_loopback(&cli_sa, family);
err = bind(fd, (struct sockaddr *)&cli_sa, sizeof(cli_sa));
- CHECK(fd == -1, "bind(cli_sa)", "err:%d errno:%d\n", err, errno);
+ RET_ERR(fd == -1, "bind(cli_sa)", "err:%d errno:%d\n", err, errno);
err = sendto(fd, data, len, MSG_FASTOPEN, (struct sockaddr *)&srv_sa,
sizeof(srv_sa));
- CHECK(err != len && expected >= PASS,
- "sendto()", "family:%u err:%d errno:%d expected:%d\n",
- family, err, errno, expected);
+ RET_ERR(err != len && expected >= PASS,
+ "sendto()", "family:%u err:%d errno:%d expected:%d\n",
+ family, err, errno, expected);
return fd;
}
@@ -375,47 +389,49 @@ static void do_test(int type, sa_family_t family, struct cmd *cmd,
cli_fd = send_data(type, family, cmd, cmd ? sizeof(*cmd) : 0,
expected);
+ if (cli_fd < 0)
+ return;
nev = epoll_wait(epfd, &ev, 1, expected >= PASS ? 5 : 0);
- CHECK((nev <= 0 && expected >= PASS) ||
- (nev > 0 && expected < PASS),
- "nev <> expected",
- "nev:%d expected:%d type:%d family:%d data:(%d, %d)\n",
- nev, expected, type, family,
- cmd ? cmd->reuseport_index : -1,
- cmd ? cmd->pass_on_failure : -1);
+ RET_IF((nev <= 0 && expected >= PASS) ||
+ (nev > 0 && expected < PASS),
+ "nev <> expected",
+ "nev:%d expected:%d type:%d family:%d data:(%d, %d)\n",
+ nev, expected, type, family,
+ cmd ? cmd->reuseport_index : -1,
+ cmd ? cmd->pass_on_failure : -1);
check_results();
check_data(type, family, cmd, cli_fd);
if (expected < PASS)
return;
- CHECK(expected != PASS_ERR_SK_SELECT_REUSEPORT &&
- cmd->reuseport_index != ev.data.u32,
- "check cmd->reuseport_index",
- "cmd:(%u, %u) ev.data.u32:%u\n",
- cmd->pass_on_failure, cmd->reuseport_index, ev.data.u32);
+ RET_IF(expected != PASS_ERR_SK_SELECT_REUSEPORT &&
+ cmd->reuseport_index != ev.data.u32,
+ "check cmd->reuseport_index",
+ "cmd:(%u, %u) ev.data.u32:%u\n",
+ cmd->pass_on_failure, cmd->reuseport_index, ev.data.u32);
srv_fd = sk_fds[ev.data.u32];
if (type == SOCK_STREAM) {
int new_fd = accept(srv_fd, NULL, 0);
- CHECK(new_fd == -1, "accept(srv_fd)",
- "ev.data.u32:%u new_fd:%d errno:%d\n",
- ev.data.u32, new_fd, errno);
+ RET_IF(new_fd == -1, "accept(srv_fd)",
+ "ev.data.u32:%u new_fd:%d errno:%d\n",
+ ev.data.u32, new_fd, errno);
nread = recv(new_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT);
- CHECK(nread != sizeof(rcv_cmd),
- "recv(new_fd)",
- "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n",
- ev.data.u32, nread, sizeof(rcv_cmd), errno);
+ RET_IF(nread != sizeof(rcv_cmd),
+ "recv(new_fd)",
+ "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n",
+ ev.data.u32, nread, sizeof(rcv_cmd), errno);
close(new_fd);
} else {
nread = recv(srv_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT);
- CHECK(nread != sizeof(rcv_cmd),
- "recv(sk_fds)",
- "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n",
- ev.data.u32, nread, sizeof(rcv_cmd), errno);
+ RET_IF(nread != sizeof(rcv_cmd),
+ "recv(sk_fds)",
+ "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n",
+ ev.data.u32, nread, sizeof(rcv_cmd), errno);
}
close(cli_fd);
@@ -428,18 +444,14 @@ static void test_err_inner_map(int type, sa_family_t family)
.pass_on_failure = 0,
};
- printf("%s: ", __func__);
expected_results[DROP_ERR_INNER_MAP]++;
do_test(type, family, &cmd, DROP_ERR_INNER_MAP);
- printf("OK\n");
}
static void test_err_skb_data(int type, sa_family_t family)
{
- printf("%s: ", __func__);
expected_results[DROP_ERR_SKB_DATA]++;
do_test(type, family, NULL, DROP_ERR_SKB_DATA);
- printf("OK\n");
}
static void test_err_sk_select_port(int type, sa_family_t family)
@@ -449,10 +461,8 @@ static void test_err_sk_select_port(int type, sa_family_t family)
.pass_on_failure = 0,
};
- printf("%s: ", __func__);
expected_results[DROP_ERR_SK_SELECT_REUSEPORT]++;
do_test(type, family, &cmd, DROP_ERR_SK_SELECT_REUSEPORT);
- printf("OK\n");
}
static void test_pass(int type, sa_family_t family)
@@ -460,14 +470,12 @@ static void test_pass(int type, sa_family_t family)
struct cmd cmd;
int i;
- printf("%s: ", __func__);
cmd.pass_on_failure = 0;
for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) {
expected_results[PASS]++;
cmd.reuseport_index = i;
do_test(type, family, &cmd, PASS);
}
- printf("OK\n");
}
static void test_syncookie(int type, sa_family_t family)
@@ -481,7 +489,6 @@ static void test_syncookie(int type, sa_family_t family)
if (type != SOCK_STREAM)
return;
- printf("%s: ", __func__);
/*
* +1 for TCP-SYN and
* +1 for the TCP-ACK (ack the syncookie)
@@ -497,17 +504,16 @@ static void test_syncookie(int type, sa_family_t family)
*/
err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero,
&tmp_index, BPF_ANY);
- CHECK(err == -1, "update_elem(tmp_index_ovr_map, 0, 1)",
- "err:%d errno:%d\n", err, errno);
+ RET_IF(err == -1, "update_elem(tmp_index_ovr_map, 0, 1)",
+ "err:%d errno:%d\n", err, errno);
do_test(type, family, &cmd, PASS);
err = bpf_map_lookup_elem(tmp_index_ovr_map, &index_zero,
&tmp_index);
- CHECK(err == -1 || tmp_index != -1,
- "lookup_elem(tmp_index_ovr_map)",
- "err:%d errno:%d tmp_index:%d\n",
- err, errno, tmp_index);
+ RET_IF(err == -1 || tmp_index != -1,
+ "lookup_elem(tmp_index_ovr_map)",
+ "err:%d errno:%d tmp_index:%d\n",
+ err, errno, tmp_index);
disable_syncookie();
- printf("OK\n");
}
static void test_pass_on_err(int type, sa_family_t family)
@@ -517,10 +523,8 @@ static void test_pass_on_err(int type, sa_family_t family)
.pass_on_failure = 1,
};
- printf("%s: ", __func__);
expected_results[PASS_ERR_SK_SELECT_REUSEPORT] += 1;
do_test(type, family, &cmd, PASS_ERR_SK_SELECT_REUSEPORT);
- printf("OK\n");
}
static void test_detach_bpf(int type, sa_family_t family)
@@ -532,46 +536,47 @@ static void test_detach_bpf(int type, sa_family_t family)
struct cmd cmd = {};
int optvalue = 0;
- printf("%s: ", __func__);
err = setsockopt(sk_fds[0], SOL_SOCKET, SO_DETACH_REUSEPORT_BPF,
&optvalue, sizeof(optvalue));
- CHECK(err == -1, "setsockopt(SO_DETACH_REUSEPORT_BPF)",
- "err:%d errno:%d\n", err, errno);
+ RET_IF(err == -1, "setsockopt(SO_DETACH_REUSEPORT_BPF)",
+ "err:%d errno:%d\n", err, errno);
err = setsockopt(sk_fds[1], SOL_SOCKET, SO_DETACH_REUSEPORT_BPF,
&optvalue, sizeof(optvalue));
- CHECK(err == 0 || errno != ENOENT, "setsockopt(SO_DETACH_REUSEPORT_BPF)",
- "err:%d errno:%d\n", err, errno);
+ RET_IF(err == 0 || errno != ENOENT,
+ "setsockopt(SO_DETACH_REUSEPORT_BPF)",
+ "err:%d errno:%d\n", err, errno);
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_lookup_elem(result_map, &i, &tmp);
- CHECK(err == -1, "lookup_elem(result_map)",
- "i:%u err:%d errno:%d\n", i, err, errno);
+ RET_IF(err == -1, "lookup_elem(result_map)",
+ "i:%u err:%d errno:%d\n", i, err, errno);
nr_run_before += tmp;
}
cli_fd = send_data(type, family, &cmd, sizeof(cmd), PASS);
+ if (cli_fd < 0)
+ return;
nev = epoll_wait(epfd, &ev, 1, 5);
- CHECK(nev <= 0, "nev <= 0",
- "nev:%d expected:1 type:%d family:%d data:(0, 0)\n",
- nev, type, family);
+ RET_IF(nev <= 0, "nev <= 0",
+ "nev:%d expected:1 type:%d family:%d data:(0, 0)\n",
+ nev, type, family);
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_lookup_elem(result_map, &i, &tmp);
- CHECK(err == -1, "lookup_elem(result_map)",
- "i:%u err:%d errno:%d\n", i, err, errno);
+ RET_IF(err == -1, "lookup_elem(result_map)",
+ "i:%u err:%d errno:%d\n", i, err, errno);
nr_run_after += tmp;
}
- CHECK(nr_run_before != nr_run_after,
- "nr_run_before != nr_run_after",
- "nr_run_before:%u nr_run_after:%u\n",
- nr_run_before, nr_run_after);
+ RET_IF(nr_run_before != nr_run_after,
+ "nr_run_before != nr_run_after",
+ "nr_run_before:%u nr_run_after:%u\n",
+ nr_run_before, nr_run_after);
- printf("OK\n");
close(cli_fd);
#else
- printf("%s: SKIP\n", __func__);
+ test__skip();
#endif
}
@@ -594,73 +599,83 @@ static void prepare_sk_fds(int type, sa_family_t family, bool inany)
*/
for (i = first; i >= 0; i--) {
sk_fds[i] = socket(family, type, 0);
- CHECK(sk_fds[i] == -1, "socket()", "sk_fds[%d]:%d errno:%d\n",
- i, sk_fds[i], errno);
+ RET_IF(sk_fds[i] == -1, "socket()", "sk_fds[%d]:%d errno:%d\n",
+ i, sk_fds[i], errno);
err = setsockopt(sk_fds[i], SOL_SOCKET, SO_REUSEPORT,
&optval, sizeof(optval));
- CHECK(err == -1, "setsockopt(SO_REUSEPORT)",
- "sk_fds[%d] err:%d errno:%d\n",
- i, err, errno);
+ RET_IF(err == -1, "setsockopt(SO_REUSEPORT)",
+ "sk_fds[%d] err:%d errno:%d\n",
+ i, err, errno);
if (i == first) {
err = setsockopt(sk_fds[i], SOL_SOCKET,
SO_ATTACH_REUSEPORT_EBPF,
&select_by_skb_data_prog,
sizeof(select_by_skb_data_prog));
- CHECK(err == -1, "setsockopt(SO_ATTACH_REUEPORT_EBPF)",
- "err:%d errno:%d\n", err, errno);
+ RET_IF(err == -1, "setsockopt(SO_ATTACH_REUEPORT_EBPF)",
+ "err:%d errno:%d\n", err, errno);
}
err = bind(sk_fds[i], (struct sockaddr *)&srv_sa, addrlen);
- CHECK(err == -1, "bind()", "sk_fds[%d] err:%d errno:%d\n",
- i, err, errno);
+ RET_IF(err == -1, "bind()", "sk_fds[%d] err:%d errno:%d\n",
+ i, err, errno);
if (type == SOCK_STREAM) {
err = listen(sk_fds[i], 10);
- CHECK(err == -1, "listen()",
- "sk_fds[%d] err:%d errno:%d\n",
- i, err, errno);
+ RET_IF(err == -1, "listen()",
+ "sk_fds[%d] err:%d errno:%d\n",
+ i, err, errno);
}
err = bpf_map_update_elem(reuseport_array, &i, &sk_fds[i],
BPF_NOEXIST);
- CHECK(err == -1, "update_elem(reuseport_array)",
- "sk_fds[%d] err:%d errno:%d\n", i, err, errno);
+ RET_IF(err == -1, "update_elem(reuseport_array)",
+ "sk_fds[%d] err:%d errno:%d\n", i, err, errno);
if (i == first) {
socklen_t addrlen = sizeof(srv_sa);
err = getsockname(sk_fds[i], (struct sockaddr *)&srv_sa,
&addrlen);
- CHECK(err == -1, "getsockname()",
- "sk_fds[%d] err:%d errno:%d\n", i, err, errno);
+ RET_IF(err == -1, "getsockname()",
+ "sk_fds[%d] err:%d errno:%d\n", i, err, errno);
}
}
epfd = epoll_create(1);
- CHECK(epfd == -1, "epoll_create(1)",
- "epfd:%d errno:%d\n", epfd, errno);
+ RET_IF(epfd == -1, "epoll_create(1)",
+ "epfd:%d errno:%d\n", epfd, errno);
ev.events = EPOLLIN;
for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) {
ev.data.u32 = i;
err = epoll_ctl(epfd, EPOLL_CTL_ADD, sk_fds[i], &ev);
- CHECK(err, "epoll_ctl(EPOLL_CTL_ADD)", "sk_fds[%d]\n", i);
+ RET_IF(err, "epoll_ctl(EPOLL_CTL_ADD)", "sk_fds[%d]\n", i);
}
}
-static void setup_per_test(int type, unsigned short family, bool inany)
+static void setup_per_test(int type, sa_family_t family, bool inany,
+ bool no_inner_map)
{
int ovr = -1, err;
prepare_sk_fds(type, family, inany);
err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, &ovr,
BPF_ANY);
- CHECK(err == -1, "update_elem(tmp_index_ovr_map, 0, -1)",
- "err:%d errno:%d\n", err, errno);
+ RET_IF(err == -1, "update_elem(tmp_index_ovr_map, 0, -1)",
+ "err:%d errno:%d\n", err, errno);
+
+ /* Install reuseport_array to outer_map? */
+ if (no_inner_map)
+ return;
+
+ err = bpf_map_update_elem(outer_map, &index_zero, &reuseport_array,
+ BPF_ANY);
+ RET_IF(err == -1, "update_elem(outer_map, 0, reuseport_array)",
+ "err:%d errno:%d\n", err, errno);
}
-static void cleanup_per_test(void)
+static void cleanup_per_test(bool no_inner_map)
{
int i, err;
@@ -668,75 +683,124 @@ static void cleanup_per_test(void)
close(sk_fds[i]);
close(epfd);
+ /* Delete reuseport_array from outer_map? */
+ if (no_inner_map)
+ return;
+
err = bpf_map_delete_elem(outer_map, &index_zero);
- CHECK(err == -1, "delete_elem(outer_map)",
- "err:%d errno:%d\n", err, errno);
+ RET_IF(err == -1, "delete_elem(outer_map)",
+ "err:%d errno:%d\n", err, errno);
}
static void cleanup(void)
{
- close(outer_map);
- close(reuseport_array);
- bpf_object__close(obj);
+ if (outer_map != -1)
+ close(outer_map);
+ if (reuseport_array != -1)
+ close(reuseport_array);
+ if (obj)
+ bpf_object__close(obj);
}
-static void test_all(void)
+static const char *family_str(sa_family_t family)
{
- /* Extra SOCK_STREAM to test bind_inany==true */
- const int types[] = { SOCK_STREAM, SOCK_DGRAM, SOCK_STREAM };
- const char * const type_strings[] = { "TCP", "UDP", "TCP" };
- const char * const family_strings[] = { "IPv6", "IPv4" };
- const unsigned short families[] = { AF_INET6, AF_INET };
- const bool bind_inany[] = { false, false, true };
- int t, f, err;
-
- for (f = 0; f < ARRAY_SIZE(families); f++) {
- unsigned short family = families[f];
-
- for (t = 0; t < ARRAY_SIZE(types); t++) {
- bool inany = bind_inany[t];
- int type = types[t];
-
- printf("######## %s/%s %s ########\n",
- family_strings[f], type_strings[t],
- inany ? " INANY " : "LOOPBACK");
-
- setup_per_test(type, family, inany);
-
- test_err_inner_map(type, family);
-
- /* Install reuseport_array to the outer_map */
- err = bpf_map_update_elem(outer_map, &index_zero,
- &reuseport_array, BPF_ANY);
- CHECK(err == -1, "update_elem(outer_map)",
- "err:%d errno:%d\n", err, errno);
-
- test_err_skb_data(type, family);
- test_err_sk_select_port(type, family);
- test_pass(type, family);
- test_syncookie(type, family);
- test_pass_on_err(type, family);
- /* Must be the last test */
- test_detach_bpf(type, family);
-
- cleanup_per_test();
- printf("\n");
- }
+ switch (family) {
+ case AF_INET:
+ return "IPv4";
+ case AF_INET6:
+ return "IPv6";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *sotype_str(int sotype)
+{
+ switch (sotype) {
+ case SOCK_STREAM:
+ return "TCP";
+ case SOCK_DGRAM:
+ return "UDP";
+ default:
+ return "unknown";
+ }
+}
+
+#define TEST_INIT(fn, ...) { fn, #fn, __VA_ARGS__ }
+
+static void test_config(int sotype, sa_family_t family, bool inany)
+{
+ const struct test {
+ void (*fn)(int sotype, sa_family_t family);
+ const char *name;
+ bool no_inner_map;
+ } tests[] = {
+ TEST_INIT(test_err_inner_map, true /* no_inner_map */),
+ TEST_INIT(test_err_skb_data),
+ TEST_INIT(test_err_sk_select_port),
+ TEST_INIT(test_pass),
+ TEST_INIT(test_syncookie),
+ TEST_INIT(test_pass_on_err),
+ TEST_INIT(test_detach_bpf),
+ };
+ char s[MAX_TEST_NAME];
+ const struct test *t;
+
+ for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+ snprintf(s, sizeof(s), "%s/%s %s %s",
+ family_str(family), sotype_str(sotype),
+ inany ? "INANY" : "LOOPBACK", t->name);
+
+ if (!test__start_subtest(s))
+ continue;
+
+ setup_per_test(sotype, family, inany, t->no_inner_map);
+ t->fn(sotype, family);
+ cleanup_per_test(t->no_inner_map);
}
}
-int main(int argc, const char **argv)
+#define BIND_INANY true
+
+static void test_all(void)
{
- create_maps();
- prepare_bpf_obj();
+ const struct config {
+ int sotype;
+ sa_family_t family;
+ bool inany;
+ } configs[] = {
+ { SOCK_STREAM, AF_INET },
+ { SOCK_STREAM, AF_INET, BIND_INANY },
+ { SOCK_STREAM, AF_INET6 },
+ { SOCK_STREAM, AF_INET6, BIND_INANY },
+ { SOCK_DGRAM, AF_INET },
+ { SOCK_DGRAM, AF_INET6 },
+ };
+ const struct config *c;
+
+ for (c = configs; c < configs + ARRAY_SIZE(configs); c++)
+ test_config(c->sotype, c->family, c->inany);
+}
+
+void test_select_reuseport(void)
+{
+ if (create_maps())
+ goto out;
+ if (prepare_bpf_obj())
+ goto out;
+
saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL);
saved_tcp_syncookie = read_int_sysctl(TCP_SYNCOOKIE_SYSCTL);
- enable_fastopen();
- disable_syncookie();
- atexit(restore_sysctls);
+ if (saved_tcp_syncookie < 0 || saved_tcp_syncookie < 0)
+ goto out;
- test_all();
+ if (enable_fastopen())
+ goto out;
+ if (disable_syncookie())
+ goto out;
+ test_all();
+out:
cleanup();
- return 0;
+ restore_sysctls();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
index a2eb8db8dafb..c6d6b685a946 100644
--- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
+++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
@@ -11,6 +11,9 @@ void test_skb_ctx(void)
.cb[4] = 5,
.priority = 6,
.tstamp = 7,
+ .wire_len = 100,
+ .gso_segs = 8,
+ .mark = 9,
};
struct bpf_prog_test_run_attr tattr = {
.data_in = &pkt_v4,
@@ -91,4 +94,8 @@ void test_skb_ctx(void)
"ctx_out_tstamp",
"skb->tstamp == %lld, expected %d\n",
skb.tstamp, 8);
+ CHECK_ATTR(skb.mark != 10,
+ "ctx_out_mark",
+ "skb->mark == %u, expected %d\n",
+ skb.mark, 10);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c
new file mode 100644
index 000000000000..9264a2736018
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <test_progs.h>
+
+struct s {
+ int a;
+ long long b;
+} __attribute__((packed));
+
+#include "test_skeleton.skel.h"
+
+void test_skeleton(void)
+{
+ int duration = 0, err;
+ struct test_skeleton* skel;
+ struct test_skeleton__bss *bss;
+ struct test_skeleton__kconfig *kcfg;
+
+ skel = test_skeleton__open();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+
+ if (CHECK(skel->kconfig, "skel_kconfig", "kconfig is mmaped()!\n"))
+ goto cleanup;
+
+ err = test_skeleton__load(skel);
+ if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
+ goto cleanup;
+
+ bss = skel->bss;
+ bss->in1 = 1;
+ bss->in2 = 2;
+ bss->in3 = 3;
+ bss->in4 = 4;
+ bss->in5.a = 5;
+ bss->in5.b = 6;
+ kcfg = skel->kconfig;
+
+ err = test_skeleton__attach(skel);
+ if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+
+ CHECK(bss->out1 != 1, "res1", "got %d != exp %d\n", bss->out1, 1);
+ CHECK(bss->out2 != 2, "res2", "got %lld != exp %d\n", bss->out2, 2);
+ CHECK(bss->out3 != 3, "res3", "got %d != exp %d\n", (int)bss->out3, 3);
+ CHECK(bss->out4 != 4, "res4", "got %lld != exp %d\n", bss->out4, 4);
+ CHECK(bss->handler_out5.a != 5, "res5", "got %d != exp %d\n",
+ bss->handler_out5.a, 5);
+ CHECK(bss->handler_out5.b != 6, "res6", "got %lld != exp %d\n",
+ bss->handler_out5.b, 6);
+
+ CHECK(bss->bpf_syscall != kcfg->CONFIG_BPF_SYSCALL, "ext1",
+ "got %d != exp %d\n", bss->bpf_syscall, kcfg->CONFIG_BPF_SYSCALL);
+ CHECK(bss->kern_ver != kcfg->LINUX_KERNEL_VERSION, "ext2",
+ "got %d != exp %d\n", bss->kern_ver, kcfg->LINUX_KERNEL_VERSION);
+
+cleanup:
+ test_skeleton__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
index d841dced971f..e8399ae50e77 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
@@ -1,16 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include "test_stacktrace_build_id.skel.h"
void test_stacktrace_build_id(void)
{
+
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *prog_name = "tracepoint/random/urandom_read";
- const char *file = "./test_stacktrace_build_id.o";
- int err, prog_fd, stack_trace_len;
+ struct test_stacktrace_build_id *skel;
+ int err, stack_trace_len;
__u32 key, previous_key, val, duration = 0;
- struct bpf_program *prog;
- struct bpf_object *obj;
- struct bpf_link *link = NULL;
char buf[256];
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
@@ -18,43 +16,24 @@ void test_stacktrace_build_id(void)
int retry = 1;
retry:
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ skel = test_stacktrace_build_id__open_and_load();
+ if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
return;
- prog = bpf_object__find_program_by_title(obj, prog_name);
- if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
- goto close_prog;
-
- link = bpf_program__attach_tracepoint(prog, "random", "urandom_read");
- if (CHECK(IS_ERR(link), "attach_tp", "err %ld\n", PTR_ERR(link)))
- goto close_prog;
+ err = test_stacktrace_build_id__attach(skel);
+ if (CHECK(err, "attach_tp", "err %d\n", err))
+ goto cleanup;
/* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
+ control_map_fd = bpf_map__fd(skel->maps.control_map);
+ stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
+ stackmap_fd = bpf_map__fd(skel->maps.stackmap);
+ stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
- goto disable_pmu;
+ goto cleanup;
if (CHECK_FAIL(system("./urandom_read")))
- goto disable_pmu;
+ goto cleanup;
/* disable stack trace collection */
key = 0;
val = 1;
@@ -66,23 +45,23 @@ retry:
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
"err %d errno %d\n", err, errno))
- goto disable_pmu;
+ goto cleanup;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
"err %d errno %d\n", err, errno))
- goto disable_pmu;
+ goto cleanup;
err = extract_build_id(buf, 256);
if (CHECK(err, "get build_id with readelf",
"err %d errno %d\n", err, errno))
- goto disable_pmu;
+ goto cleanup;
err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
- goto disable_pmu;
+ goto cleanup;
do {
char build_id[64];
@@ -90,7 +69,7 @@ retry:
err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
if (CHECK(err, "lookup_elem from stackmap",
"err %d, errno %d\n", err, errno))
- goto disable_pmu;
+ goto cleanup;
for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
id_offs[i].offset != 0) {
@@ -108,8 +87,7 @@ retry:
* try it one more time.
*/
if (build_id_matches < 1 && retry--) {
- bpf_link__destroy(link);
- bpf_object__close(obj);
+ test_stacktrace_build_id__destroy(skel);
printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
__func__);
goto retry;
@@ -117,17 +95,14 @@ retry:
if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n"))
- goto disable_pmu;
+ goto cleanup;
- stack_trace_len = PERF_MAX_STACK_DEPTH
- * sizeof(struct bpf_stack_build_id);
+ stack_trace_len = PERF_MAX_STACK_DEPTH *
+ sizeof(struct bpf_stack_build_id);
err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
"err %d errno %d\n", err, errno);
-disable_pmu:
- bpf_link__destroy(link);
-
-close_prog:
- bpf_object__close(obj);
+cleanup:
+ test_stacktrace_build_id__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
index f62aa0eb959b..8974450a4bdb 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include "test_stacktrace_build_id.skel.h"
static __u64 read_perf_max_sample_freq(void)
{
@@ -16,19 +17,15 @@ static __u64 read_perf_max_sample_freq(void)
void test_stacktrace_build_id_nmi(void)
{
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *prog_name = "tracepoint/random/urandom_read";
- const char *file = "./test_stacktrace_build_id.o";
- int err, pmu_fd, prog_fd;
+ int control_map_fd, stackid_hmap_fd, stackmap_fd;
+ struct test_stacktrace_build_id *skel;
+ int err, pmu_fd;
struct perf_event_attr attr = {
.freq = 1,
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
__u32 key, previous_key, val, duration = 0;
- struct bpf_program *prog;
- struct bpf_object *obj;
- struct bpf_link *link;
char buf[256];
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
@@ -38,13 +35,16 @@ void test_stacktrace_build_id_nmi(void)
attr.sample_freq = read_perf_max_sample_freq();
retry:
- err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ skel = test_stacktrace_build_id__open();
+ if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
- prog = bpf_object__find_program_by_title(obj, prog_name);
- if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
- goto close_prog;
+ /* override program type */
+ bpf_program__set_perf_event(skel->progs.oncpu);
+
+ err = test_stacktrace_build_id__load(skel);
+ if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
+ goto cleanup;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
@@ -52,40 +52,25 @@ retry:
if (CHECK(pmu_fd < 0, "perf_event_open",
"err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
pmu_fd, errno))
- goto close_prog;
+ goto cleanup;
- link = bpf_program__attach_perf_event(prog, pmu_fd);
- if (CHECK(IS_ERR(link), "attach_perf_event",
- "err %ld\n", PTR_ERR(link))) {
+ skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
+ pmu_fd);
+ if (CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event",
+ "err %ld\n", PTR_ERR(skel->links.oncpu))) {
close(pmu_fd);
- goto close_prog;
+ goto cleanup;
}
/* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
+ control_map_fd = bpf_map__fd(skel->maps.control_map);
+ stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
+ stackmap_fd = bpf_map__fd(skel->maps.stackmap);
if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
- goto disable_pmu;
+ goto cleanup;
if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000")))
- goto disable_pmu;
+ goto cleanup;
/* disable stack trace collection */
key = 0;
val = 1;
@@ -97,23 +82,23 @@ retry:
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
"err %d errno %d\n", err, errno))
- goto disable_pmu;
+ goto cleanup;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
"err %d errno %d\n", err, errno))
- goto disable_pmu;
+ goto cleanup;
err = extract_build_id(buf, 256);
if (CHECK(err, "get build_id with readelf",
"err %d errno %d\n", err, errno))
- goto disable_pmu;
+ goto cleanup;
err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
- goto disable_pmu;
+ goto cleanup;
do {
char build_id[64];
@@ -121,7 +106,7 @@ retry:
err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
if (CHECK(err, "lookup_elem from stackmap",
"err %d, errno %d\n", err, errno))
- goto disable_pmu;
+ goto cleanup;
for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
id_offs[i].offset != 0) {
@@ -139,8 +124,7 @@ retry:
* try it one more time.
*/
if (build_id_matches < 1 && retry--) {
- bpf_link__destroy(link);
- bpf_object__close(obj);
+ test_stacktrace_build_id__destroy(skel);
printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
__func__);
goto retry;
@@ -148,7 +132,7 @@ retry:
if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n"))
- goto disable_pmu;
+ goto cleanup;
/*
* We intentionally skip compare_stack_ips(). This is because we
@@ -157,8 +141,6 @@ retry:
* BPF_STACK_BUILD_ID_IP;
*/
-disable_pmu:
- bpf_link__destroy(link);
-close_prog:
- bpf_object__close(obj);
+cleanup:
+ test_stacktrace_build_id__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c
new file mode 100644
index 000000000000..7185bee16fe4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_xdp_perf(void)
+{
+ const char *file = "./xdp_dummy.o";
+ __u32 duration, retval, size;
+ struct bpf_object *obj;
+ char in[128], out[128];
+ int err, prog_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ err = bpf_prog_test_run(prog_fd, 1000000, &in[0], 128,
+ out, &size, &retval, &duration);
+
+ CHECK(err || retval != XDP_PASS || size != 128,
+ "xdp-perf",
+ "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___equiv_zero_sz_arr.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___equiv_zero_sz_arr.c
new file mode 100644
index 000000000000..65eac371b061
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___equiv_zero_sz_arr.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_arrays___equiv_zero_sz_arr x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_zero_sz_arr.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_zero_sz_arr.c
new file mode 100644
index 000000000000..ecda2b545ac2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_zero_sz_arr.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_arrays___err_bad_zero_sz_arr x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___fixed_arr.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___fixed_arr.c
new file mode 100644
index 000000000000..fe1d01232c22
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___fixed_arr.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_arrays___fixed_arr x) {}
diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
index 9311489e14b2..6d598cfbdb3e 100644
--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
+++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
@@ -327,6 +327,7 @@ struct core_reloc_arrays_output {
char b123;
int c1c;
int d00d;
+ int f10c;
};
struct core_reloc_arrays_substruct {
@@ -339,6 +340,7 @@ struct core_reloc_arrays {
char b[2][3][4];
struct core_reloc_arrays_substruct c[3];
struct core_reloc_arrays_substruct d[1][2];
+ struct core_reloc_arrays_substruct f[][2];
};
/* bigger array dimensions */
@@ -347,6 +349,7 @@ struct core_reloc_arrays___diff_arr_dim {
char b[3][4][5];
struct core_reloc_arrays_substruct c[4];
struct core_reloc_arrays_substruct d[2][3];
+ struct core_reloc_arrays_substruct f[1][3];
};
/* different size of array's value (struct) */
@@ -363,6 +366,29 @@ struct core_reloc_arrays___diff_arr_val_sz {
int d;
int __padding2;
} d[1][2];
+ struct {
+ int __padding1;
+ int c;
+ int __padding2;
+ } f[][2];
+};
+
+struct core_reloc_arrays___equiv_zero_sz_arr {
+ int a[5];
+ char b[2][3][4];
+ struct core_reloc_arrays_substruct c[3];
+ struct core_reloc_arrays_substruct d[1][2];
+ /* equivalent to flexible array */
+ struct core_reloc_arrays_substruct f[0][2];
+};
+
+struct core_reloc_arrays___fixed_arr {
+ int a[5];
+ char b[2][3][4];
+ struct core_reloc_arrays_substruct c[3];
+ struct core_reloc_arrays_substruct d[1][2];
+ /* not a flexible array anymore, but within access bounds */
+ struct core_reloc_arrays_substruct f[1][2];
};
struct core_reloc_arrays___err_too_small {
@@ -370,6 +396,7 @@ struct core_reloc_arrays___err_too_small {
char b[2][3][4];
struct core_reloc_arrays_substruct c[3];
struct core_reloc_arrays_substruct d[1][2];
+ struct core_reloc_arrays_substruct f[][2];
};
struct core_reloc_arrays___err_too_shallow {
@@ -377,6 +404,7 @@ struct core_reloc_arrays___err_too_shallow {
char b[2][3]; /* this one lacks one dimension */
struct core_reloc_arrays_substruct c[3];
struct core_reloc_arrays_substruct d[1][2];
+ struct core_reloc_arrays_substruct f[][2];
};
struct core_reloc_arrays___err_non_array {
@@ -384,6 +412,7 @@ struct core_reloc_arrays___err_non_array {
char b[2][3][4];
struct core_reloc_arrays_substruct c[3];
struct core_reloc_arrays_substruct d[1][2];
+ struct core_reloc_arrays_substruct f[][2];
};
struct core_reloc_arrays___err_wrong_val_type {
@@ -391,6 +420,16 @@ struct core_reloc_arrays___err_wrong_val_type {
char b[2][3][4];
int c[3]; /* value is not a struct */
struct core_reloc_arrays_substruct d[1][2];
+ struct core_reloc_arrays_substruct f[][2];
+};
+
+struct core_reloc_arrays___err_bad_zero_sz_arr {
+ /* zero-sized array, but not at the end */
+ struct core_reloc_arrays_substruct f[0][2];
+ int a[5];
+ char b[2][3][4];
+ struct core_reloc_arrays_substruct c[3];
+ struct core_reloc_arrays_substruct d[1][2];
};
/*
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c
index 534621e38906..221b69700625 100644
--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
@@ -5,46 +5,36 @@
#include <linux/bpf.h>
#include "bpf_helpers.h"
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 4);
- __type(key, int);
- __type(value, int);
-} results_map SEC(".maps");
+int kprobe_res = 0;
+int kretprobe_res = 0;
+int uprobe_res = 0;
+int uretprobe_res = 0;
SEC("kprobe/sys_nanosleep")
-int handle_sys_nanosleep_entry(struct pt_regs *ctx)
+int handle_kprobe(struct pt_regs *ctx)
{
- const int key = 0, value = 1;
-
- bpf_map_update_elem(&results_map, &key, &value, 0);
+ kprobe_res = 1;
return 0;
}
SEC("kretprobe/sys_nanosleep")
-int handle_sys_getpid_return(struct pt_regs *ctx)
+int handle_kretprobe(struct pt_regs *ctx)
{
- const int key = 1, value = 2;
-
- bpf_map_update_elem(&results_map, &key, &value, 0);
+ kretprobe_res = 2;
return 0;
}
SEC("uprobe/trigger_func")
-int handle_uprobe_entry(struct pt_regs *ctx)
+int handle_uprobe(struct pt_regs *ctx)
{
- const int key = 2, value = 3;
-
- bpf_map_update_elem(&results_map, &key, &value, 0);
+ uprobe_res = 3;
return 0;
}
SEC("uretprobe/trigger_func")
-int handle_uprobe_return(struct pt_regs *ctx)
+int handle_uretprobe(struct pt_regs *ctx)
{
- const int key = 3, value = 4;
-
- bpf_map_update_elem(&results_map, &key, &value, 0);
+ uretprobe_res = 4;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_extern.c b/tools/testing/selftests/bpf/progs/test_core_extern.c
new file mode 100644
index 000000000000..9bfc91d9d004
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_extern.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+/* non-existing BPF helper, to test dead code elimination */
+static int (*bpf_missing_helper)(const void *arg1, int arg2) = (void *) 999;
+
+extern int LINUX_KERNEL_VERSION __kconfig;
+extern bool CONFIG_BPF_SYSCALL __kconfig; /* strong */
+extern enum libbpf_tristate CONFIG_TRISTATE __kconfig __weak;
+extern bool CONFIG_BOOL __kconfig __weak;
+extern char CONFIG_CHAR __kconfig __weak;
+extern uint16_t CONFIG_USHORT __kconfig __weak;
+extern int CONFIG_INT __kconfig __weak;
+extern uint64_t CONFIG_ULONG __kconfig __weak;
+extern const char CONFIG_STR[8] __kconfig __weak;
+extern uint64_t CONFIG_MISSING __kconfig __weak;
+
+uint64_t kern_ver = -1;
+uint64_t bpf_syscall = -1;
+uint64_t tristate_val = -1;
+uint64_t bool_val = -1;
+uint64_t char_val = -1;
+uint64_t ushort_val = -1;
+uint64_t int_val = -1;
+uint64_t ulong_val = -1;
+char str_val[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+uint64_t missing_val = -1;
+
+SEC("raw_tp/sys_enter")
+int handle_sys_enter(struct pt_regs *ctx)
+{
+ int i;
+
+ kern_ver = LINUX_KERNEL_VERSION;
+ bpf_syscall = CONFIG_BPF_SYSCALL;
+ tristate_val = CONFIG_TRISTATE;
+ bool_val = CONFIG_BOOL;
+ char_val = CONFIG_CHAR;
+ ushort_val = CONFIG_USHORT;
+ int_val = CONFIG_INT;
+ ulong_val = CONFIG_ULONG;
+
+ for (i = 0; i < sizeof(CONFIG_STR); i++) {
+ str_val[i] = CONFIG_STR[i];
+ }
+
+ if (CONFIG_MISSING)
+ /* invalid, but dead code - never executed */
+ missing_val = bpf_missing_helper(ctx, 123);
+ else
+ missing_val = 0xDEADC0DE;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
index 89951b684282..053b86f6b53f 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
@@ -18,6 +18,7 @@ struct core_reloc_arrays_output {
char b123;
int c1c;
int d00d;
+ int f01c;
};
struct core_reloc_arrays_substruct {
@@ -30,6 +31,7 @@ struct core_reloc_arrays {
char b[2][3][4];
struct core_reloc_arrays_substruct c[3];
struct core_reloc_arrays_substruct d[1][2];
+ struct core_reloc_arrays_substruct f[][2];
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
@@ -40,18 +42,16 @@ int test_core_arrays(void *ctx)
struct core_reloc_arrays *in = (void *)&data.in;
struct core_reloc_arrays_output *out = (void *)&data.out;
- /* in->a[2] */
if (CORE_READ(&out->a2, &in->a[2]))
return 1;
- /* in->b[1][2][3] */
if (CORE_READ(&out->b123, &in->b[1][2][3]))
return 1;
- /* in->c[1].c */
if (CORE_READ(&out->c1c, &in->c[1].c))
return 1;
- /* in->d[0][0].d */
if (CORE_READ(&out->d00d, &in->d[0][0].d))
return 1;
+ if (CORE_READ(&out->f01c, &in->f[0][1].c))
+ return 1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
index ea7d84f01235..b1f09f5bb1cf 100644
--- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
@@ -62,7 +62,7 @@ struct {
goto done; \
})
-SEC("select_by_skb_data")
+SEC("sk_reuseport")
int _select_by_skb_data(struct sk_reuseport_md *reuse_md)
{
__u32 linum, index = 0, flags = 0, index_zero = 0;
diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
index 2a9f4c736ebc..e18da87fe84f 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
@@ -17,6 +17,12 @@ int process(struct __sk_buff *skb)
}
skb->priority++;
skb->tstamp++;
+ skb->mark++;
+
+ if (skb->wire_len != 100)
+ return 1;
+ if (skb->gso_segs != 8)
+ return 1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c
new file mode 100644
index 000000000000..4f69aac5635f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_skeleton.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct s {
+ int a;
+ long long b;
+} __attribute__((packed));
+
+int in1 = 0;
+long long in2 = 0;
+char in3 = '\0';
+long long in4 __attribute__((aligned(64))) = 0;
+struct s in5 = {};
+
+long long out2 = 0;
+char out3 = 0;
+long long out4 = 0;
+int out1 = 0;
+
+extern bool CONFIG_BPF_SYSCALL __kconfig;
+extern int LINUX_KERNEL_VERSION __kconfig;
+bool bpf_syscall = 0;
+int kern_ver = 0;
+
+SEC("raw_tp/sys_enter")
+int handler(const void *ctx)
+{
+ static volatile struct s out5;
+
+ out1 = in1;
+ out2 = in2;
+ out3 = in3;
+ out4 = in4;
+ out5 = in5;
+
+ bpf_syscall = CONFIG_BPF_SYSCALL;
+ kern_ver = LINUX_KERNEL_VERSION;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_cgroup_attach.c b/tools/testing/selftests/bpf/test_cgroup_attach.c
deleted file mode 100644
index 7671909ee1cb..000000000000
--- a/tools/testing/selftests/bpf/test_cgroup_attach.c
+++ /dev/null
@@ -1,571 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/* eBPF example program:
- *
- * - Creates arraymap in kernel with 4 bytes keys and 8 byte values
- *
- * - Loads eBPF program
- *
- * The eBPF program accesses the map passed in to store two pieces of
- * information. The number of invocations of the program, which maps
- * to the number of packets received, is stored to key 0. Key 1 is
- * incremented on each iteration by the number of bytes stored in
- * the skb. The program also stores the number of received bytes
- * in the cgroup storage.
- *
- * - Attaches the new program to a cgroup using BPF_PROG_ATTACH
- *
- * - Every second, reads map[0] and map[1] to see how many bytes and
- * packets were seen on any socket of tasks in the given cgroup.
- */
-
-#define _GNU_SOURCE
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <assert.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <unistd.h>
-#include <linux/filter.h>
-
-#include <linux/bpf.h>
-#include <bpf/bpf.h>
-
-#include "bpf_util.h"
-#include "bpf_rlimit.h"
-#include "cgroup_helpers.h"
-
-#define FOO "/foo"
-#define BAR "/foo/bar/"
-#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
-
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
-
-#ifdef DEBUG
-#define debug(args...) printf(args)
-#else
-#define debug(args...)
-#endif
-
-static int prog_load(int verdict)
-{
- int ret;
- struct bpf_insn prog[] = {
- BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
- BPF_EXIT_INSN(),
- };
- size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
-
- ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
- prog, insns_cnt, "GPL", 0,
- bpf_log_buf, BPF_LOG_BUF_SIZE);
-
- if (ret < 0) {
- log_err("Loading program");
- printf("Output from verifier:\n%s\n-------\n", bpf_log_buf);
- return 0;
- }
- return ret;
-}
-
-static int test_foo_bar(void)
-{
- int drop_prog, allow_prog, foo = 0, bar = 0, rc = 0;
-
- allow_prog = prog_load(1);
- if (!allow_prog)
- goto err;
-
- drop_prog = prog_load(0);
- if (!drop_prog)
- goto err;
-
- if (setup_cgroup_environment())
- goto err;
-
- /* Create cgroup /foo, get fd, and join it */
- foo = create_and_get_cgroup(FOO);
- if (foo < 0)
- goto err;
-
- if (join_cgroup(FOO))
- goto err;
-
- if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_OVERRIDE)) {
- log_err("Attaching prog to /foo");
- goto err;
- }
-
- debug("Attached DROP prog. This ping in cgroup /foo should fail...\n");
- assert(system(PING_CMD) != 0);
-
- /* Create cgroup /foo/bar, get fd, and join it */
- bar = create_and_get_cgroup(BAR);
- if (bar < 0)
- goto err;
-
- if (join_cgroup(BAR))
- goto err;
-
- debug("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n");
- assert(system(PING_CMD) != 0);
-
- if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_OVERRIDE)) {
- log_err("Attaching prog to /foo/bar");
- goto err;
- }
-
- debug("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n");
- assert(system(PING_CMD) == 0);
-
- if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
- log_err("Detaching program from /foo/bar");
- goto err;
- }
-
- debug("Detached PASS from /foo/bar while DROP is attached to /foo.\n"
- "This ping in cgroup /foo/bar should fail...\n");
- assert(system(PING_CMD) != 0);
-
- if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_OVERRIDE)) {
- log_err("Attaching prog to /foo/bar");
- goto err;
- }
-
- if (bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) {
- log_err("Detaching program from /foo");
- goto err;
- }
-
- debug("Attached PASS from /foo/bar and detached DROP from /foo.\n"
- "This ping in cgroup /foo/bar should pass...\n");
- assert(system(PING_CMD) == 0);
-
- if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_OVERRIDE)) {
- log_err("Attaching prog to /foo/bar");
- goto err;
- }
-
- if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
- errno = 0;
- log_err("Unexpected success attaching prog to /foo/bar");
- goto err;
- }
-
- if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
- log_err("Detaching program from /foo/bar");
- goto err;
- }
-
- if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) {
- errno = 0;
- log_err("Unexpected success in double detach from /foo");
- goto err;
- }
-
- if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
- log_err("Attaching non-overridable prog to /foo");
- goto err;
- }
-
- if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
- errno = 0;
- log_err("Unexpected success attaching non-overridable prog to /foo/bar");
- goto err;
- }
-
- if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_OVERRIDE)) {
- errno = 0;
- log_err("Unexpected success attaching overridable prog to /foo/bar");
- goto err;
- }
-
- if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_OVERRIDE)) {
- errno = 0;
- log_err("Unexpected success attaching overridable prog to /foo");
- goto err;
- }
-
- if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
- log_err("Attaching different non-overridable prog to /foo");
- goto err;
- }
-
- goto out;
-
-err:
- rc = 1;
-
-out:
- close(foo);
- close(bar);
- cleanup_cgroup_environment();
- if (!rc)
- printf("#override:PASS\n");
- else
- printf("#override:FAIL\n");
- return rc;
-}
-
-static int map_fd = -1;
-
-static int prog_load_cnt(int verdict, int val)
-{
- int cgroup_storage_fd, percpu_cgroup_storage_fd;
-
- if (map_fd < 0)
- map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
- if (map_fd < 0) {
- printf("failed to create map '%s'\n", strerror(errno));
- return -1;
- }
-
- cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE,
- sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
- if (cgroup_storage_fd < 0) {
- printf("failed to create map '%s'\n", strerror(errno));
- return -1;
- }
-
- percpu_cgroup_storage_fd = bpf_create_map(
- BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
- sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
- if (percpu_cgroup_storage_fd < 0) {
- printf("failed to create map '%s'\n", strerror(errno));
- return -1;
- }
-
- struct bpf_insn prog[] = {
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
- BPF_LD_MAP_FD(BPF_REG_1, map_fd),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
-
- BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
- BPF_MOV64_IMM(BPF_REG_1, val),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
-
- BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
-
- BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
- BPF_EXIT_INSN(),
- };
- size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
- int ret;
-
- ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
- prog, insns_cnt, "GPL", 0,
- bpf_log_buf, BPF_LOG_BUF_SIZE);
-
- if (ret < 0) {
- log_err("Loading program");
- printf("Output from verifier:\n%s\n-------\n", bpf_log_buf);
- return 0;
- }
- close(cgroup_storage_fd);
- return ret;
-}
-
-
-static int test_multiprog(void)
-{
- __u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id;
- int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0;
- int drop_prog, allow_prog[6] = {}, rc = 0;
- unsigned long long value;
- int i = 0;
-
- for (i = 0; i < 6; i++) {
- allow_prog[i] = prog_load_cnt(1, 1 << i);
- if (!allow_prog[i])
- goto err;
- }
- drop_prog = prog_load_cnt(0, 1);
- if (!drop_prog)
- goto err;
-
- if (setup_cgroup_environment())
- goto err;
-
- cg1 = create_and_get_cgroup("/cg1");
- if (cg1 < 0)
- goto err;
- cg2 = create_and_get_cgroup("/cg1/cg2");
- if (cg2 < 0)
- goto err;
- cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
- if (cg3 < 0)
- goto err;
- cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
- if (cg4 < 0)
- goto err;
- cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
- if (cg5 < 0)
- goto err;
-
- if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
- goto err;
-
- if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_MULTI)) {
- log_err("Attaching prog to cg1");
- goto err;
- }
- if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_MULTI)) {
- log_err("Unexpected success attaching the same prog to cg1");
- goto err;
- }
- if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_MULTI)) {
- log_err("Attaching prog2 to cg1");
- goto err;
- }
- if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_OVERRIDE)) {
- log_err("Attaching prog to cg2");
- goto err;
- }
- if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_MULTI)) {
- log_err("Attaching prog to cg3");
- goto err;
- }
- if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_OVERRIDE)) {
- log_err("Attaching prog to cg4");
- goto err;
- }
- if (bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0)) {
- log_err("Attaching prog to cg5");
- goto err;
- }
- assert(system(PING_CMD) == 0);
- assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
- assert(value == 1 + 2 + 8 + 32);
-
- /* query the number of effective progs in cg5 */
- assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
- NULL, NULL, &prog_cnt) == 0);
- assert(prog_cnt == 4);
- /* retrieve prog_ids of effective progs in cg5 */
- assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
- &attach_flags, prog_ids, &prog_cnt) == 0);
- assert(prog_cnt == 4);
- assert(attach_flags == 0);
- saved_prog_id = prog_ids[0];
- /* check enospc handling */
- prog_ids[0] = 0;
- prog_cnt = 2;
- assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
- &attach_flags, prog_ids, &prog_cnt) == -1 &&
- errno == ENOSPC);
- assert(prog_cnt == 4);
- /* check that prog_ids are returned even when buffer is too small */
- assert(prog_ids[0] == saved_prog_id);
- /* retrieve prog_id of single attached prog in cg5 */
- prog_ids[0] = 0;
- assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0,
- NULL, prog_ids, &prog_cnt) == 0);
- assert(prog_cnt == 1);
- assert(prog_ids[0] == saved_prog_id);
-
- /* detach bottom program and ping again */
- if (bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS)) {
- log_err("Detaching prog from cg5");
- goto err;
- }
- value = 0;
- assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
- assert(system(PING_CMD) == 0);
- assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
- assert(value == 1 + 2 + 8 + 16);
-
- /* detach 3rd from bottom program and ping again */
- errno = 0;
- if (!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS)) {
- log_err("Unexpected success on detach from cg3");
- goto err;
- }
- if (bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS)) {
- log_err("Detaching from cg3");
- goto err;
- }
- value = 0;
- assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
- assert(system(PING_CMD) == 0);
- assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
- assert(value == 1 + 2 + 16);
-
- /* detach 2nd from bottom program and ping again */
- if (bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS)) {
- log_err("Detaching prog from cg4");
- goto err;
- }
- value = 0;
- assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
- assert(system(PING_CMD) == 0);
- assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
- assert(value == 1 + 2 + 4);
-
- prog_cnt = 4;
- assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
- &attach_flags, prog_ids, &prog_cnt) == 0);
- assert(prog_cnt == 3);
- assert(attach_flags == 0);
- assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0,
- NULL, prog_ids, &prog_cnt) == 0);
- assert(prog_cnt == 0);
- goto out;
-err:
- rc = 1;
-
-out:
- for (i = 0; i < 6; i++)
- if (allow_prog[i] > 0)
- close(allow_prog[i]);
- close(cg1);
- close(cg2);
- close(cg3);
- close(cg4);
- close(cg5);
- cleanup_cgroup_environment();
- if (!rc)
- printf("#multi:PASS\n");
- else
- printf("#multi:FAIL\n");
- return rc;
-}
-
-static int test_autodetach(void)
-{
- __u32 prog_cnt = 4, attach_flags;
- int allow_prog[2] = {0};
- __u32 prog_ids[2] = {0};
- int cg = 0, i, rc = -1;
- void *ptr = NULL;
- int attempts;
-
- for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
- allow_prog[i] = prog_load_cnt(1, 1 << i);
- if (!allow_prog[i])
- goto err;
- }
-
- if (setup_cgroup_environment())
- goto err;
-
- /* create a cgroup, attach two programs and remember their ids */
- cg = create_and_get_cgroup("/cg_autodetach");
- if (cg < 0)
- goto err;
-
- if (join_cgroup("/cg_autodetach"))
- goto err;
-
- for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
- if (bpf_prog_attach(allow_prog[i], cg, BPF_CGROUP_INET_EGRESS,
- BPF_F_ALLOW_MULTI)) {
- log_err("Attaching prog[%d] to cg:egress", i);
- goto err;
- }
- }
-
- /* make sure that programs are attached and run some traffic */
- assert(bpf_prog_query(cg, BPF_CGROUP_INET_EGRESS, 0, &attach_flags,
- prog_ids, &prog_cnt) == 0);
- assert(system(PING_CMD) == 0);
-
- /* allocate some memory (4Mb) to pin the original cgroup */
- ptr = malloc(4 * (1 << 20));
- if (!ptr)
- goto err;
-
- /* close programs and cgroup fd */
- for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
- close(allow_prog[i]);
- allow_prog[i] = 0;
- }
-
- close(cg);
- cg = 0;
-
- /* leave the cgroup and remove it. don't detach programs */
- cleanup_cgroup_environment();
-
- /* wait for the asynchronous auto-detachment.
- * wait for no more than 5 sec and give up.
- */
- for (i = 0; i < ARRAY_SIZE(prog_ids); i++) {
- for (attempts = 5; attempts >= 0; attempts--) {
- int fd = bpf_prog_get_fd_by_id(prog_ids[i]);
-
- if (fd < 0)
- break;
-
- /* don't leave the fd open */
- close(fd);
-
- if (!attempts)
- goto err;
-
- sleep(1);
- }
- }
-
- rc = 0;
-err:
- for (i = 0; i < ARRAY_SIZE(allow_prog); i++)
- if (allow_prog[i] > 0)
- close(allow_prog[i]);
- if (cg)
- close(cg);
- free(ptr);
- cleanup_cgroup_environment();
- if (!rc)
- printf("#autodetach:PASS\n");
- else
- printf("#autodetach:FAIL\n");
- return rc;
-}
-
-int main(void)
-{
- int (*tests[])(void) = {
- test_foo_bar,
- test_multiprog,
- test_autodetach,
- };
- int errors = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tests); i++)
- if (tests[i]())
- errors++;
-
- if (errors)
- printf("test_cgroup_attach:FAIL\n");
- else
- printf("test_cgroup_attach:PASS\n");
-
- return errors ? EXIT_FAILURE : EXIT_SUCCESS;
-}
diff --git a/tools/testing/selftests/bpf/test_cpp.cpp b/tools/testing/selftests/bpf/test_cpp.cpp
index f0eb2727b766..6fe23a10d48a 100644
--- a/tools/testing/selftests/bpf/test_cpp.cpp
+++ b/tools/testing/selftests/bpf/test_cpp.cpp
@@ -1,12 +1,16 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#include <iostream>
#include "libbpf.h"
#include "bpf.h"
#include "btf.h"
+#include "test_core_extern.skel.h"
/* do nothing, just make sure we can link successfully */
int main(int argc, char *argv[])
{
+ struct test_core_extern *skel;
+
/* libbpf.h */
libbpf_set_print(NULL);
@@ -16,5 +20,11 @@ int main(int argc, char *argv[])
/* btf.h */
btf__new(NULL, 0);
+ /* BPF skeleton */
+ skel = test_core_extern__open_and_load();
+ test_core_extern__destroy(skel);
+
+ std::cout << "DONE!" << std::endl;
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 8477df835979..de1fdaa4e7b4 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -100,6 +100,7 @@ extern struct ipv6_packet pkt_v6;
#define _CHECK(condition, tag, duration, format...) ({ \
int __ret = !!(condition); \
+ int __save_errno = errno; \
if (__ret) { \
test__fail(); \
printf("%s:FAIL:%s ", __func__, tag); \
@@ -108,15 +109,18 @@ extern struct ipv6_packet pkt_v6;
printf("%s:PASS:%s %d nsec\n", \
__func__, tag, duration); \
} \
+ errno = __save_errno; \
__ret; \
})
#define CHECK_FAIL(condition) ({ \
int __ret = !!(condition); \
+ int __save_errno = errno; \
if (__ret) { \
test__fail(); \
printf("%s:FAIL:%d\n", __func__, __LINE__); \
} \
+ errno = __save_errno; \
__ret; \
})