aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests')
-rw-r--r--tools/testing/selftests/bpf/Makefile25
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h5
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c2
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.h24
-rw-r--r--tools/testing/selftests/bpf/map_tests/sk_storage_map.c629
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c113
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c42
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c80
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c79
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields_kern.c53
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_tunnel.c64
-rw-r--r--tools/testing/selftests/bpf/test_btf.c63
-rw-r--r--tools/testing/selftests/bpf/test_btf.h69
-rw-r--r--tools/testing/selftests/bpf/test_maps.c18
-rw-r--r--tools/testing/selftests/bpf/test_maps.h17
-rw-r--r--tools/testing/selftests/bpf/test_sock_fields.c115
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh20
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c55
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_tp_writable.c34
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c116
22 files changed, 1514 insertions, 158 deletions
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index f9d83ba7843e..66f2dca1dee1 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -74,6 +74,8 @@ all: $(TEST_CUSTOM_PROGS)
$(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c
$(CC) -o $@ $< -Wl,--build-id
+$(OUTPUT)/test_maps: map_tests/*.c
+
BPFOBJ := $(OUTPUT)/libbpf.a
$(TEST_GEN_PROGS): $(BPFOBJ)
@@ -232,6 +234,27 @@ $(PROG_TESTS_H): $(PROG_TESTS_DIR) $(PROG_TESTS_FILES)
echo '#endif' \
) > $(PROG_TESTS_H))
+TEST_MAPS_CFLAGS := -I. -I$(OUTPUT)
+MAP_TESTS_DIR = $(OUTPUT)/map_tests
+$(MAP_TESTS_DIR):
+ mkdir -p $@
+MAP_TESTS_H := $(MAP_TESTS_DIR)/tests.h
+test_maps.c: $(MAP_TESTS_H)
+$(OUTPUT)/test_maps: CFLAGS += $(TEST_MAPS_CFLAGS)
+MAP_TESTS_FILES := $(wildcard map_tests/*.c)
+$(MAP_TESTS_H): $(MAP_TESTS_DIR) $(MAP_TESTS_FILES)
+ $(shell ( cd map_tests/; \
+ echo '/* Generated header, do not edit */'; \
+ echo '#ifdef DECLARE'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\([^\.]*\)\.c@extern void test_\1(void);@'; \
+ echo '#endif'; \
+ echo '#ifdef CALL'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\([^\.]*\)\.c@test_\1();@'; \
+ echo '#endif' \
+ ) > $(MAP_TESTS_H))
+
VERIFIER_TESTS_H := $(OUTPUT)/verifier/tests.h
test_verifier.c: $(VERIFIER_TESTS_H)
$(OUTPUT)/test_verifier: CFLAGS += $(TEST_VERIFIER_CFLAGS)
@@ -251,4 +274,4 @@ $(OUTPUT)/verifier/tests.h: $(VERIFIER_TESTS_DIR) $(VERIFIER_TEST_FILES)
) > $(VERIFIER_TESTS_H))
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) \
- $(VERIFIER_TESTS_H) $(PROG_TESTS_H)
+ $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H)
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 59e221586cf7..6e80b66d7fb1 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -211,6 +211,11 @@ static int (*bpf_strtol)(const char *buf, unsigned long long buf_len,
static int (*bpf_strtoul)(const char *buf, unsigned long long buf_len,
unsigned long long flags, unsigned long *res) =
(void *) BPF_FUNC_strtoul;
+static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk,
+ void *value, __u64 flags) =
+ (void *) BPF_FUNC_sk_storage_get;
+static int (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) =
+ (void *)BPF_FUNC_sk_storage_delete;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 8c976476f6fd..f7a0744db31e 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -33,3 +33,4 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_IPV6_SIT=m
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index 7136ab9ffa73..3fd83b9dc1bf 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -26,7 +26,7 @@ static void load_and_attach_program(void)
struct bpf_object *obj;
ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name,
- cfg_map_name, &prog_fd);
+ cfg_map_name, NULL, &prog_fd, NULL);
if (ret)
error(1, 0, "bpf_flow_load %s", cfg_path_name);
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h
index 41dd6959feb0..daeaeb518894 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.h
+++ b/tools/testing/selftests/bpf/flow_dissector_load.h
@@ -9,10 +9,12 @@ static inline int bpf_flow_load(struct bpf_object **obj,
const char *path,
const char *section_name,
const char *map_name,
- int *prog_fd)
+ const char *keys_map_name,
+ int *prog_fd,
+ int *keys_fd)
{
struct bpf_program *prog, *main_prog;
- struct bpf_map *prog_array;
+ struct bpf_map *prog_array, *keys;
int prog_array_fd;
int ret, fd, i;
@@ -23,19 +25,29 @@ static inline int bpf_flow_load(struct bpf_object **obj,
main_prog = bpf_object__find_program_by_title(*obj, section_name);
if (!main_prog)
- return ret;
+ return -1;
*prog_fd = bpf_program__fd(main_prog);
if (*prog_fd < 0)
- return ret;
+ return -1;
prog_array = bpf_object__find_map_by_name(*obj, map_name);
if (!prog_array)
- return ret;
+ return -1;
prog_array_fd = bpf_map__fd(prog_array);
if (prog_array_fd < 0)
- return ret;
+ return -1;
+
+ if (keys_map_name && keys_fd) {
+ keys = bpf_object__find_map_by_name(*obj, keys_map_name);
+ if (!keys)
+ return -1;
+
+ *keys_fd = bpf_map__fd(keys);
+ if (*keys_fd < 0)
+ return -1;
+ }
i = 0;
bpf_object__for_each_program(prog, *obj) {
diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
new file mode 100644
index 000000000000..e569edc679d8
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <linux/compiler.h>
+#include <linux/err.h>
+
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <linux/btf.h>
+#include <unistd.h>
+#include <signal.h>
+#include <errno.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include <test_btf.h>
+#include <test_maps.h>
+
+static struct bpf_create_map_attr xattr = {
+ .name = "sk_storage_map",
+ .map_type = BPF_MAP_TYPE_SK_STORAGE,
+ .map_flags = BPF_F_NO_PREALLOC,
+ .max_entries = 0,
+ .key_size = 4,
+ .value_size = 8,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 3,
+ .btf_fd = -1,
+};
+
+static unsigned int nr_sk_threads_done;
+static unsigned int nr_sk_threads_err;
+static unsigned int nr_sk_per_thread = 4096;
+static unsigned int nr_sk_threads = 4;
+static int sk_storage_map = -1;
+static unsigned int stop;
+static int runtime_s = 5;
+
+static bool is_stopped(void)
+{
+ return READ_ONCE(stop);
+}
+
+static unsigned int threads_err(void)
+{
+ return READ_ONCE(nr_sk_threads_err);
+}
+
+static void notify_thread_err(void)
+{
+ __sync_add_and_fetch(&nr_sk_threads_err, 1);
+}
+
+static bool wait_for_threads_err(void)
+{
+ while (!is_stopped() && !threads_err())
+ usleep(500);
+
+ return !is_stopped();
+}
+
+static unsigned int threads_done(void)
+{
+ return READ_ONCE(nr_sk_threads_done);
+}
+
+static void notify_thread_done(void)
+{
+ __sync_add_and_fetch(&nr_sk_threads_done, 1);
+}
+
+static void notify_thread_redo(void)
+{
+ __sync_sub_and_fetch(&nr_sk_threads_done, 1);
+}
+
+static bool wait_for_threads_done(void)
+{
+ while (threads_done() != nr_sk_threads && !is_stopped() &&
+ !threads_err())
+ usleep(50);
+
+ return !is_stopped() && !threads_err();
+}
+
+static bool wait_for_threads_redo(void)
+{
+ while (threads_done() && !is_stopped() && !threads_err())
+ usleep(50);
+
+ return !is_stopped() && !threads_err();
+}
+
+static bool wait_for_map(void)
+{
+ while (READ_ONCE(sk_storage_map) == -1 && !is_stopped())
+ usleep(50);
+
+ return !is_stopped();
+}
+
+static bool wait_for_map_close(void)
+{
+ while (READ_ONCE(sk_storage_map) != -1 && !is_stopped())
+ ;
+
+ return !is_stopped();
+}
+
+static int load_btf(void)
+{
+ const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
+ __u32 btf_raw_types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* struct bpf_spin_lock */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
+ BTF_MEMBER_ENC(15, 1, 0), /* int val; */
+ /* struct val */ /* [3] */
+ BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
+ BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
+ BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
+ };
+ struct btf_header btf_hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = sizeof(btf_raw_types),
+ .str_off = sizeof(btf_raw_types),
+ .str_len = sizeof(btf_str_sec),
+ };
+ __u8 raw_btf[sizeof(struct btf_header) + sizeof(btf_raw_types) +
+ sizeof(btf_str_sec)];
+
+ memcpy(raw_btf, &btf_hdr, sizeof(btf_hdr));
+ memcpy(raw_btf + sizeof(btf_hdr), btf_raw_types, sizeof(btf_raw_types));
+ memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types),
+ btf_str_sec, sizeof(btf_str_sec));
+
+ return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0);
+}
+
+static int create_sk_storage_map(void)
+{
+ int btf_fd, map_fd;
+
+ btf_fd = load_btf();
+ CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
+ btf_fd, errno);
+ xattr.btf_fd = btf_fd;
+
+ map_fd = bpf_create_map_xattr(&xattr);
+ xattr.btf_fd = -1;
+ close(btf_fd);
+ CHECK(map_fd == -1,
+ "bpf_create_map_xattr()", "errno:%d\n", errno);
+
+ return map_fd;
+}
+
+static void *insert_close_thread(void *arg)
+{
+ struct {
+ int cnt;
+ int lock;
+ } value = { .cnt = 0xeB9F, .lock = 0, };
+ int i, map_fd, err, *sk_fds;
+
+ sk_fds = malloc(sizeof(*sk_fds) * nr_sk_per_thread);
+ if (!sk_fds) {
+ notify_thread_err();
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < nr_sk_per_thread; i++)
+ sk_fds[i] = -1;
+
+ while (!is_stopped()) {
+ if (!wait_for_map())
+ goto close_all;
+
+ map_fd = READ_ONCE(sk_storage_map);
+ for (i = 0; i < nr_sk_per_thread && !is_stopped(); i++) {
+ sk_fds[i] = socket(AF_INET6, SOCK_STREAM, 0);
+ if (sk_fds[i] == -1) {
+ err = -errno;
+ fprintf(stderr, "socket(): errno:%d\n", errno);
+ goto errout;
+ }
+ err = bpf_map_update_elem(map_fd, &sk_fds[i], &value,
+ BPF_NOEXIST);
+ if (err) {
+ err = -errno;
+ fprintf(stderr,
+ "bpf_map_update_elem(): errno:%d\n",
+ errno);
+ goto errout;
+ }
+ }
+
+ notify_thread_done();
+ wait_for_map_close();
+
+close_all:
+ for (i = 0; i < nr_sk_per_thread; i++) {
+ close(sk_fds[i]);
+ sk_fds[i] = -1;
+ }
+
+ notify_thread_redo();
+ }
+
+ free(sk_fds);
+ return NULL;
+
+errout:
+ for (i = 0; i < nr_sk_per_thread && sk_fds[i] != -1; i++)
+ close(sk_fds[i]);
+ free(sk_fds);
+ notify_thread_err();
+ return ERR_PTR(err);
+}
+
+static int do_sk_storage_map_stress_free(void)
+{
+ int i, map_fd = -1, err = 0, nr_threads_created = 0;
+ pthread_t *sk_thread_ids;
+ void *thread_ret;
+
+ sk_thread_ids = malloc(sizeof(pthread_t) * nr_sk_threads);
+ if (!sk_thread_ids) {
+ fprintf(stderr, "malloc(sk_threads): NULL\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_sk_threads; i++) {
+ err = pthread_create(&sk_thread_ids[i], NULL,
+ insert_close_thread, NULL);
+ if (err) {
+ err = -errno;
+ goto done;
+ }
+ nr_threads_created++;
+ }
+
+ while (!is_stopped()) {
+ map_fd = create_sk_storage_map();
+ WRITE_ONCE(sk_storage_map, map_fd);
+
+ if (!wait_for_threads_done())
+ break;
+
+ WRITE_ONCE(sk_storage_map, -1);
+ close(map_fd);
+ map_fd = -1;
+
+ if (!wait_for_threads_redo())
+ break;
+ }
+
+done:
+ WRITE_ONCE(stop, 1);
+ for (i = 0; i < nr_threads_created; i++) {
+ pthread_join(sk_thread_ids[i], &thread_ret);
+ if (IS_ERR(thread_ret) && !err) {
+ err = PTR_ERR(thread_ret);
+ fprintf(stderr, "threads#%u: err:%d\n", i, err);
+ }
+ }
+ free(sk_thread_ids);
+
+ if (map_fd != -1)
+ close(map_fd);
+
+ return err;
+}
+
+static void *update_thread(void *arg)
+{
+ struct {
+ int cnt;
+ int lock;
+ } value = { .cnt = 0xeB9F, .lock = 0, };
+ int map_fd = READ_ONCE(sk_storage_map);
+ int sk_fd = *(int *)arg;
+ int err = 0; /* Suppress compiler false alarm */
+
+ while (!is_stopped()) {
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
+ if (err && errno != EAGAIN) {
+ err = -errno;
+ fprintf(stderr, "bpf_map_update_elem: %d %d\n",
+ err, errno);
+ break;
+ }
+ }
+
+ if (!is_stopped()) {
+ notify_thread_err();
+ return ERR_PTR(err);
+ }
+
+ return NULL;
+}
+
+static void *delete_thread(void *arg)
+{
+ int map_fd = READ_ONCE(sk_storage_map);
+ int sk_fd = *(int *)arg;
+ int err = 0; /* Suppress compiler false alarm */
+
+ while (!is_stopped()) {
+ err = bpf_map_delete_elem(map_fd, &sk_fd);
+ if (err && errno != ENOENT) {
+ err = -errno;
+ fprintf(stderr, "bpf_map_delete_elem: %d %d\n",
+ err, errno);
+ break;
+ }
+ }
+
+ if (!is_stopped()) {
+ notify_thread_err();
+ return ERR_PTR(err);
+ }
+
+ return NULL;
+}
+
+static int do_sk_storage_map_stress_change(void)
+{
+ int i, sk_fd, map_fd = -1, err = 0, nr_threads_created = 0;
+ pthread_t *sk_thread_ids;
+ void *thread_ret;
+
+ sk_thread_ids = malloc(sizeof(pthread_t) * nr_sk_threads);
+ if (!sk_thread_ids) {
+ fprintf(stderr, "malloc(sk_threads): NULL\n");
+ return -ENOMEM;
+ }
+
+ sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (sk_fd == -1) {
+ err = -errno;
+ goto done;
+ }
+
+ map_fd = create_sk_storage_map();
+ WRITE_ONCE(sk_storage_map, map_fd);
+
+ for (i = 0; i < nr_sk_threads; i++) {
+ if (i & 0x1)
+ err = pthread_create(&sk_thread_ids[i], NULL,
+ update_thread, &sk_fd);
+ else
+ err = pthread_create(&sk_thread_ids[i], NULL,
+ delete_thread, &sk_fd);
+ if (err) {
+ err = -errno;
+ goto done;
+ }
+ nr_threads_created++;
+ }
+
+ wait_for_threads_err();
+
+done:
+ WRITE_ONCE(stop, 1);
+ for (i = 0; i < nr_threads_created; i++) {
+ pthread_join(sk_thread_ids[i], &thread_ret);
+ if (IS_ERR(thread_ret) && !err) {
+ err = PTR_ERR(thread_ret);
+ fprintf(stderr, "threads#%u: err:%d\n", i, err);
+ }
+ }
+ free(sk_thread_ids);
+
+ if (sk_fd != -1)
+ close(sk_fd);
+ close(map_fd);
+
+ return err;
+}
+
+static void stop_handler(int signum)
+{
+ if (signum != SIGALRM)
+ printf("stopping...\n");
+ WRITE_ONCE(stop, 1);
+}
+
+#define BPF_SK_STORAGE_MAP_TEST_NR_THREADS "BPF_SK_STORAGE_MAP_TEST_NR_THREADS"
+#define BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD "BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD"
+#define BPF_SK_STORAGE_MAP_TEST_RUNTIME_S "BPF_SK_STORAGE_MAP_TEST_RUNTIME_S"
+#define BPF_SK_STORAGE_MAP_TEST_NAME "BPF_SK_STORAGE_MAP_TEST_NAME"
+
+static void test_sk_storage_map_stress_free(void)
+{
+ struct rlimit rlim_old, rlim_new = {};
+ int err;
+
+ getrlimit(RLIMIT_NOFILE, &rlim_old);
+
+ signal(SIGTERM, stop_handler);
+ signal(SIGINT, stop_handler);
+ if (runtime_s > 0) {
+ signal(SIGALRM, stop_handler);
+ alarm(runtime_s);
+ }
+
+ if (rlim_old.rlim_cur < nr_sk_threads * nr_sk_per_thread) {
+ rlim_new.rlim_cur = nr_sk_threads * nr_sk_per_thread + 128;
+ rlim_new.rlim_max = rlim_new.rlim_cur + 128;
+ err = setrlimit(RLIMIT_NOFILE, &rlim_new);
+ CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d",
+ rlim_new.rlim_cur, errno);
+ }
+
+ err = do_sk_storage_map_stress_free();
+
+ signal(SIGTERM, SIG_DFL);
+ signal(SIGINT, SIG_DFL);
+ if (runtime_s > 0) {
+ signal(SIGALRM, SIG_DFL);
+ alarm(0);
+ }
+
+ if (rlim_new.rlim_cur)
+ setrlimit(RLIMIT_NOFILE, &rlim_old);
+
+ CHECK(err, "test_sk_storage_map_stress_free", "err:%d\n", err);
+}
+
+static void test_sk_storage_map_stress_change(void)
+{
+ int err;
+
+ signal(SIGTERM, stop_handler);
+ signal(SIGINT, stop_handler);
+ if (runtime_s > 0) {
+ signal(SIGALRM, stop_handler);
+ alarm(runtime_s);
+ }
+
+ err = do_sk_storage_map_stress_change();
+
+ signal(SIGTERM, SIG_DFL);
+ signal(SIGINT, SIG_DFL);
+ if (runtime_s > 0) {
+ signal(SIGALRM, SIG_DFL);
+ alarm(0);
+ }
+
+ CHECK(err, "test_sk_storage_map_stress_change", "err:%d\n", err);
+}
+
+static void test_sk_storage_map_basic(void)
+{
+ struct {
+ int cnt;
+ int lock;
+ } value = { .cnt = 0xeB9f, .lock = 0, }, lookup_value;
+ struct bpf_create_map_attr bad_xattr;
+ int btf_fd, map_fd, sk_fd, err;
+
+ btf_fd = load_btf();
+ CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
+ btf_fd, errno);
+ xattr.btf_fd = btf_fd;
+
+ sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ CHECK(sk_fd == -1, "socket()", "sk_fd:%d errno:%d\n",
+ sk_fd, errno);
+
+ map_fd = bpf_create_map_xattr(&xattr);
+ CHECK(map_fd == -1, "bpf_create_map_xattr(good_xattr)",
+ "map_fd:%d errno:%d\n", map_fd, errno);
+
+ /* Add new elem */
+ memcpy(&lookup_value, &value, sizeof(value));
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value,
+ BPF_NOEXIST | BPF_F_LOCK);
+ CHECK(err, "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
+ "err:%d errno:%d\n", err, errno);
+ err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
+ BPF_F_LOCK);
+ CHECK(err || lookup_value.cnt != value.cnt,
+ "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
+ "err:%d errno:%d cnt:%x(%x)\n",
+ err, errno, lookup_value.cnt, value.cnt);
+
+ /* Bump the cnt and update with BPF_EXIST | BPF_F_LOCK */
+ value.cnt += 1;
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value,
+ BPF_EXIST | BPF_F_LOCK);
+ CHECK(err, "bpf_map_update_elem(BPF_EXIST|BPF_F_LOCK)",
+ "err:%d errno:%d\n", err, errno);
+ err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
+ BPF_F_LOCK);
+ CHECK(err || lookup_value.cnt != value.cnt,
+ "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
+ "err:%d errno:%d cnt:%x(%x)\n",
+ err, errno, lookup_value.cnt, value.cnt);
+
+ /* Bump the cnt and update with BPF_EXIST */
+ value.cnt += 1;
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_EXIST);
+ CHECK(err, "bpf_map_update_elem(BPF_EXIST)",
+ "err:%d errno:%d\n", err, errno);
+ err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
+ BPF_F_LOCK);
+ CHECK(err || lookup_value.cnt != value.cnt,
+ "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
+ "err:%d errno:%d cnt:%x(%x)\n",
+ err, errno, lookup_value.cnt, value.cnt);
+
+ /* Update with BPF_NOEXIST */
+ value.cnt += 1;
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value,
+ BPF_NOEXIST | BPF_F_LOCK);
+ CHECK(!err || errno != EEXIST,
+ "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
+ "err:%d errno:%d\n", err, errno);
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_NOEXIST);
+ CHECK(!err || errno != EEXIST, "bpf_map_update_elem(BPF_NOEXIST)",
+ "err:%d errno:%d\n", err, errno);
+ value.cnt -= 1;
+ err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
+ BPF_F_LOCK);
+ CHECK(err || lookup_value.cnt != value.cnt,
+ "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
+ "err:%d errno:%d cnt:%x(%x)\n",
+ err, errno, lookup_value.cnt, value.cnt);
+
+ /* Bump the cnt again and update with map_flags == 0 */
+ value.cnt += 1;
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
+ CHECK(err, "bpf_map_update_elem()", "err:%d errno:%d\n",
+ err, errno);
+ err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
+ BPF_F_LOCK);
+ CHECK(err || lookup_value.cnt != value.cnt,
+ "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
+ "err:%d errno:%d cnt:%x(%x)\n",
+ err, errno, lookup_value.cnt, value.cnt);
+
+ /* Test delete elem */
+ err = bpf_map_delete_elem(map_fd, &sk_fd);
+ CHECK(err, "bpf_map_delete_elem()", "err:%d errno:%d\n",
+ err, errno);
+ err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
+ BPF_F_LOCK);
+ CHECK(!err || errno != ENOENT,
+ "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
+ "err:%d errno:%d\n", err, errno);
+ err = bpf_map_delete_elem(map_fd, &sk_fd);
+ CHECK(!err || errno != ENOENT, "bpf_map_delete_elem()",
+ "err:%d errno:%d\n", err, errno);
+
+ memcpy(&bad_xattr, &xattr, sizeof(xattr));
+ bad_xattr.btf_key_type_id = 0;
+ err = bpf_create_map_xattr(&bad_xattr);
+ CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
+ "err:%d errno:%d\n", err, errno);
+
+ memcpy(&bad_xattr, &xattr, sizeof(xattr));
+ bad_xattr.btf_key_type_id = 3;
+ err = bpf_create_map_xattr(&bad_xattr);
+ CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
+ "err:%d errno:%d\n", err, errno);
+
+ memcpy(&bad_xattr, &xattr, sizeof(xattr));
+ bad_xattr.max_entries = 1;
+ err = bpf_create_map_xattr(&bad_xattr);
+ CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
+ "err:%d errno:%d\n", err, errno);
+
+ memcpy(&bad_xattr, &xattr, sizeof(xattr));
+ bad_xattr.map_flags = 0;
+ err = bpf_create_map_xattr(&bad_xattr);
+ CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
+ "err:%d errno:%d\n", err, errno);
+
+ xattr.btf_fd = -1;
+ close(btf_fd);
+ close(map_fd);
+ close(sk_fd);
+}
+
+void test_sk_storage_map(void)
+{
+ const char *test_name, *env_opt;
+ bool test_ran = false;
+
+ test_name = getenv(BPF_SK_STORAGE_MAP_TEST_NAME);
+
+ env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_NR_THREADS);
+ if (env_opt)
+ nr_sk_threads = atoi(env_opt);
+
+ env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD);
+ if (env_opt)
+ nr_sk_per_thread = atoi(env_opt);
+
+ env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_RUNTIME_S);
+ if (env_opt)
+ runtime_s = atoi(env_opt);
+
+ if (!test_name || !strcmp(test_name, "basic")) {
+ test_sk_storage_map_basic();
+ test_ran = true;
+ }
+ if (!test_name || !strcmp(test_name, "stress_free")) {
+ test_sk_storage_map_stress_free();
+ test_ran = true;
+ }
+ if (!test_name || !strcmp(test_name, "stress_change")) {
+ test_sk_storage_map_stress_change();
+ test_ran = true;
+ }
+
+ if (test_ran)
+ printf("%s:PASS\n", __func__);
+ else
+ CHECK(1, "Invalid test_name", "%s\n", test_name);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
index 126319f9a97c..8b54adfd6264 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include <error.h>
+#include <linux/if.h>
+#include <linux/if_tun.h>
#define CHECK_FLOW_KEYS(desc, got, expected) \
CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \
@@ -79,8 +82,8 @@ struct test tests[] = {
.tcp.doff = 5,
},
.keys = {
- .nhoff = 0,
- .thoff = sizeof(struct iphdr),
+ .nhoff = ETH_HLEN,
+ .thoff = ETH_HLEN + sizeof(struct iphdr),
.addr_proto = ETH_P_IP,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IP),
@@ -95,8 +98,8 @@ struct test tests[] = {
.tcp.doff = 5,
},
.keys = {
- .nhoff = 0,
- .thoff = sizeof(struct ipv6hdr),
+ .nhoff = ETH_HLEN,
+ .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
.addr_proto = ETH_P_IPV6,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
@@ -113,8 +116,8 @@ struct test tests[] = {
.tcp.doff = 5,
},
.keys = {
- .nhoff = VLAN_HLEN,
- .thoff = VLAN_HLEN + sizeof(struct iphdr),
+ .nhoff = ETH_HLEN + VLAN_HLEN,
+ .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
.addr_proto = ETH_P_IP,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IP),
@@ -131,8 +134,9 @@ struct test tests[] = {
.tcp.doff = 5,
},
.keys = {
- .nhoff = VLAN_HLEN * 2,
- .thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr),
+ .nhoff = ETH_HLEN + VLAN_HLEN * 2,
+ .thoff = ETH_HLEN + VLAN_HLEN * 2 +
+ sizeof(struct ipv6hdr),
.addr_proto = ETH_P_IPV6,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
@@ -140,13 +144,73 @@ struct test tests[] = {
},
};
+static int create_tap(const char *ifname)
+{
+ struct ifreq ifr = {
+ .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
+ };
+ int fd, ret;
+
+ strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
+
+ fd = open("/dev/net/tun", O_RDWR);
+ if (fd < 0)
+ return -1;
+
+ ret = ioctl(fd, TUNSETIFF, &ifr);
+ if (ret)
+ return -1;
+
+ return fd;
+}
+
+static int tx_tap(int fd, void *pkt, size_t len)
+{
+ struct iovec iov[] = {
+ {
+ .iov_len = len,
+ .iov_base = pkt,
+ },
+ };
+ return writev(fd, iov, ARRAY_SIZE(iov));
+}
+
+static int ifup(const char *ifname)
+{
+ struct ifreq ifr = {};
+ int sk, ret;
+
+ strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
+
+ sk = socket(PF_INET, SOCK_DGRAM, 0);
+ if (sk < 0)
+ return -1;
+
+ ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
+ if (ret) {
+ close(sk);
+ return -1;
+ }
+
+ ifr.ifr_flags |= IFF_UP;
+ ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
+ if (ret) {
+ close(sk);
+ return -1;
+ }
+
+ close(sk);
+ return 0;
+}
+
void test_flow_dissector(void)
{
+ int i, err, prog_fd, keys_fd = -1, tap_fd;
struct bpf_object *obj;
- int i, err, prog_fd;
+ __u32 duration = 0;
err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
- "jmp_table", &prog_fd);
+ "jmp_table", "last_dissection", &prog_fd, &keys_fd);
if (err) {
error_cnt++;
return;
@@ -171,5 +235,34 @@ void test_flow_dissector(void)
CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
}
+ /* Do the same tests but for skb-less flow dissector.
+ * We use a known path in the net/tun driver that calls
+ * eth_get_headlen and we manually export bpf_flow_keys
+ * via BPF map in this case.
+ */
+
+ err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
+ CHECK(err, "bpf_prog_attach", "err %d errno %d", err, errno);
+
+ tap_fd = create_tap("tap0");
+ CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d", tap_fd, errno);
+ err = ifup("tap0");
+ CHECK(err, "ifup", "err %d errno %d", err, errno);
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ struct bpf_flow_keys flow_keys = {};
+ struct bpf_prog_test_run_attr tattr = {};
+ __u32 key = 0;
+
+ err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
+ CHECK(err < 0, "tx_tap", "err %d errno %d", err, errno);
+
+ err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
+ CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err);
+
+ CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
+ CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
+ }
+
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c
new file mode 100644
index 000000000000..dc5ef155ec28
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_flow_dissector_load_bytes(void)
+{
+ struct bpf_flow_keys flow_keys;
+ __u32 duration = 0, retval, size;
+ struct bpf_insn prog[] = {
+ // BPF_REG_1 - 1st argument: context
+ // BPF_REG_2 - 2nd argument: offset, start at first byte
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ // BPF_REG_3 - 3rd argument: destination, reserve byte on stack
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_3, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -1),
+ // BPF_REG_4 - 4th argument: copy one byte
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ // bpf_skb_load_bytes(ctx, sizeof(pkt_v4), ptr, 1)
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_load_bytes),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ // if (ret == 0) return BPF_DROP (2)
+ BPF_MOV64_IMM(BPF_REG_0, BPF_DROP),
+ BPF_EXIT_INSN(),
+ // if (ret != 0) return BPF_OK (0)
+ BPF_MOV64_IMM(BPF_REG_0, BPF_OK),
+ BPF_EXIT_INSN(),
+ };
+ int fd, err;
+
+ /* make sure bpf_skb_load_bytes is not allowed from skb-less context
+ */
+ fd = bpf_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog,
+ ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
+ CHECK(fd < 0,
+ "flow_dissector-bpf_skb_load_bytes-load",
+ "fd %d errno %d\n",
+ fd, errno);
+
+ err = bpf_prog_test_run(fd, 1, &pkt_v4, sizeof(pkt_v4),
+ &flow_keys, &size, &retval, &duration);
+ CHECK(size != sizeof(flow_keys) || err || retval != 1,
+ "flow_dissector-bpf_skb_load_bytes",
+ "err %d errno %d retval %d duration %d size %u/%zu\n",
+ err, errno, retval, duration, size, sizeof(flow_keys));
+
+ if (fd >= -1)
+ close(fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
new file mode 100644
index 000000000000..9807336a3016
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include <linux/nbd.h>
+
+void test_raw_tp_writable_reject_nbd_invalid(void)
+{
+ __u32 duration = 0;
+ char error[4096];
+ int bpf_fd = -1, tp_fd = -1;
+
+ const struct bpf_insn program[] = {
+ /* r6 is our tp buffer */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* one byte beyond the end of the nbd_request struct */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6,
+ sizeof(struct nbd_request)),
+ BPF_EXIT_INSN(),
+ };
+
+ struct bpf_load_program_attr load_attr = {
+ .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
+ .license = "GPL v2",
+ .insns = program,
+ .insns_cnt = sizeof(program) / sizeof(struct bpf_insn),
+ .log_level = 2,
+ };
+
+ bpf_fd = bpf_load_program_xattr(&load_attr, error, sizeof(error));
+ if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable load",
+ "failed: %d errno %d\n", bpf_fd, errno))
+ return;
+
+ tp_fd = bpf_raw_tracepoint_open("nbd_send_request", bpf_fd);
+ if (CHECK(tp_fd >= 0, "bpf_raw_tracepoint_writable open",
+ "erroneously succeeded\n"))
+ goto out_bpffd;
+
+ close(tp_fd);
+out_bpffd:
+ close(bpf_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
new file mode 100644
index 000000000000..5c45424cac5f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include <linux/nbd.h>
+
+void test_raw_tp_writable_test_run(void)
+{
+ __u32 duration = 0;
+ char error[4096];
+
+ const struct bpf_insn trace_program[] = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+
+ struct bpf_load_program_attr load_attr = {
+ .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
+ .license = "GPL v2",
+ .insns = trace_program,
+ .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn),
+ .log_level = 2,
+ };
+
+ int bpf_fd = bpf_load_program_xattr(&load_attr, error, sizeof(error));
+ if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable loaded",
+ "failed: %d errno %d\n", bpf_fd, errno))
+ return;
+
+ const struct bpf_insn skb_program[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+
+ struct bpf_load_program_attr skb_load_attr = {
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .license = "GPL v2",
+ .insns = skb_program,
+ .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn),
+ };
+
+ int filter_fd =
+ bpf_load_program_xattr(&skb_load_attr, error, sizeof(error));
+ if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n",
+ filter_fd, errno))
+ goto out_bpffd;
+
+ int tp_fd = bpf_raw_tracepoint_open("bpf_test_finish", bpf_fd);
+ if (CHECK(tp_fd < 0, "bpf_raw_tracepoint_writable opened",
+ "failed: %d errno %d\n", tp_fd, errno))
+ goto out_filterfd;
+
+ char test_skb[128] = {
+ 0,
+ };
+
+ __u32 prog_ret;
+ int err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0,
+ 0, &prog_ret, 0);
+ CHECK(err != 42, "test_run",
+ "tracepoint did not modify return value\n");
+ CHECK(prog_ret != 0, "test_run_ret",
+ "socket_filter did not return 0\n");
+
+ close(tp_fd);
+
+ err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, 0,
+ &prog_ret, 0);
+ CHECK(err != 0, "test_run_notrace",
+ "test_run failed with %d errno %d\n", err, errno);
+ CHECK(prog_ret != 0, "test_run_ret_notrace",
+ "socket_filter did not return 0\n");
+
+out_filterfd:
+ close(filter_fd);
+out_bpffd:
+ close(bpf_fd);
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 75b17cada539..81ad9a0b29d0 100644
--- a/tools/testing/selftests/bpf/progs/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
@@ -64,6 +64,25 @@ struct bpf_map_def SEC("maps") jmp_table = {
.max_entries = 8
};
+struct bpf_map_def SEC("maps") last_dissection = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct bpf_flow_keys),
+ .max_entries = 1,
+};
+
+static __always_inline int export_flow_keys(struct bpf_flow_keys *keys,
+ int ret)
+{
+ struct bpf_flow_keys *val;
+ __u32 key = 0;
+
+ val = bpf_map_lookup_elem(&last_dissection, &key);
+ if (val)
+ memcpy(val, keys, sizeof(*val));
+ return ret;
+}
+
static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb,
__u16 hdr_size,
void *buffer)
@@ -109,10 +128,10 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
break;
default:
/* Protocol not supported */
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
}
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
}
SEC("flow_dissector")
@@ -139,8 +158,8 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
case IPPROTO_ICMP:
icmp = bpf_flow_dissect_get_header(skb, sizeof(*icmp), &_icmp);
if (!icmp)
- return BPF_DROP;
- return BPF_OK;
+ return export_flow_keys(keys, BPF_DROP);
+ return export_flow_keys(keys, BPF_OK);
case IPPROTO_IPIP:
keys->is_encap = true;
return parse_eth_proto(skb, bpf_htons(ETH_P_IP));
@@ -150,11 +169,11 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
case IPPROTO_GRE:
gre = bpf_flow_dissect_get_header(skb, sizeof(*gre), &_gre);
if (!gre)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
if (bpf_htons(gre->flags & GRE_VERSION))
/* Only inspect standard GRE packets with version 0 */
- return BPF_OK;
+ return export_flow_keys(keys, BPF_OK);
keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */
if (GRE_IS_CSUM(gre->flags))
@@ -170,7 +189,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
eth = bpf_flow_dissect_get_header(skb, sizeof(*eth),
&_eth);
if (!eth)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->thoff += sizeof(*eth);
@@ -181,31 +200,31 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
case IPPROTO_TCP:
tcp = bpf_flow_dissect_get_header(skb, sizeof(*tcp), &_tcp);
if (!tcp)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
if (tcp->doff < 5)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
if ((__u8 *)tcp + (tcp->doff << 2) > data_end)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->sport = tcp->source;
keys->dport = tcp->dest;
- return BPF_OK;
+ return export_flow_keys(keys, BPF_OK);
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
udp = bpf_flow_dissect_get_header(skb, sizeof(*udp), &_udp);
if (!udp)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->sport = udp->source;
keys->dport = udp->dest;
- return BPF_OK;
+ return export_flow_keys(keys, BPF_OK);
default:
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
}
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
}
static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr)
@@ -225,7 +244,7 @@ static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr)
return parse_ip_proto(skb, nexthdr);
}
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
}
PROG(IP)(struct __sk_buff *skb)
@@ -238,11 +257,11 @@ PROG(IP)(struct __sk_buff *skb)
iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph);
if (!iph)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
/* IP header cannot be smaller than 20 bytes */
if (iph->ihl < 5)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->addr_proto = ETH_P_IP;
keys->ipv4_src = iph->saddr;
@@ -250,7 +269,7 @@ PROG(IP)(struct __sk_buff *skb)
keys->thoff += iph->ihl << 2;
if (data + keys->thoff > data_end)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) {
keys->is_frag = true;
@@ -264,7 +283,7 @@ PROG(IP)(struct __sk_buff *skb)
}
if (done)
- return BPF_OK;
+ return export_flow_keys(keys, BPF_OK);
return parse_ip_proto(skb, iph->protocol);
}
@@ -276,7 +295,7 @@ PROG(IPV6)(struct __sk_buff *skb)
ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h);
if (!ip6h)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->addr_proto = ETH_P_IPV6;
memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr));
@@ -288,11 +307,12 @@ PROG(IPV6)(struct __sk_buff *skb)
PROG(IPV6OP)(struct __sk_buff *skb)
{
+ struct bpf_flow_keys *keys = skb->flow_keys;
struct ipv6_opt_hdr *ip6h, _ip6h;
ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h);
if (!ip6h)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
/* hlen is in 8-octets and does not include the first 8 bytes
* of the header
@@ -309,7 +329,7 @@ PROG(IPV6FR)(struct __sk_buff *skb)
fragh = bpf_flow_dissect_get_header(skb, sizeof(*fragh), &_fragh);
if (!fragh)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->thoff += sizeof(*fragh);
keys->is_frag = true;
@@ -321,13 +341,14 @@ PROG(IPV6FR)(struct __sk_buff *skb)
PROG(MPLS)(struct __sk_buff *skb)
{
+ struct bpf_flow_keys *keys = skb->flow_keys;
struct mpls_label *mpls, _mpls;
mpls = bpf_flow_dissect_get_header(skb, sizeof(*mpls), &_mpls);
if (!mpls)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
- return BPF_OK;
+ return export_flow_keys(keys, BPF_OK);
}
PROG(VLAN)(struct __sk_buff *skb)
@@ -339,10 +360,10 @@ PROG(VLAN)(struct __sk_buff *skb)
if (keys->n_proto == bpf_htons(ETH_P_8021AD)) {
vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
if (!vlan)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->nhoff += sizeof(*vlan);
keys->thoff += sizeof(*vlan);
@@ -350,14 +371,14 @@ PROG(VLAN)(struct __sk_buff *skb)
vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
if (!vlan)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->nhoff += sizeof(*vlan);
keys->thoff += sizeof(*vlan);
/* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->n_proto = vlan->h_vlan_encapsulated_proto;
return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
index 37328f148538..1c39e4ccb7f1 100644
--- a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
@@ -55,6 +55,31 @@ struct bpf_map_def SEC("maps") linum_map = {
.max_entries = __NR_BPF_LINUM_ARRAY_IDX,
};
+struct bpf_spinlock_cnt {
+ struct bpf_spin_lock lock;
+ __u32 cnt;
+};
+
+struct bpf_map_def SEC("maps") sk_pkt_out_cnt = {
+ .type = BPF_MAP_TYPE_SK_STORAGE,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct bpf_spinlock_cnt),
+ .max_entries = 0,
+ .map_flags = BPF_F_NO_PREALLOC,
+};
+
+BPF_ANNOTATE_KV_PAIR(sk_pkt_out_cnt, int, struct bpf_spinlock_cnt);
+
+struct bpf_map_def SEC("maps") sk_pkt_out_cnt10 = {
+ .type = BPF_MAP_TYPE_SK_STORAGE,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct bpf_spinlock_cnt),
+ .max_entries = 0,
+ .map_flags = BPF_F_NO_PREALLOC,
+};
+
+BPF_ANNOTATE_KV_PAIR(sk_pkt_out_cnt10, int, struct bpf_spinlock_cnt);
+
static bool is_loopback6(__u32 *a6)
{
return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1);
@@ -120,7 +145,9 @@ static void tpcpy(struct bpf_tcp_sock *dst,
SEC("cgroup_skb/egress")
int egress_read_sock_fields(struct __sk_buff *skb)
{
+ struct bpf_spinlock_cnt cli_cnt_init = { .lock = 0, .cnt = 0xeB9F };
__u32 srv_idx = ADDR_SRV_IDX, cli_idx = ADDR_CLI_IDX, result_idx;
+ struct bpf_spinlock_cnt *pkt_out_cnt, *pkt_out_cnt10;
struct sockaddr_in6 *srv_sa6, *cli_sa6;
struct bpf_tcp_sock *tp, *tp_ret;
struct bpf_sock *sk, *sk_ret;
@@ -161,6 +188,32 @@ int egress_read_sock_fields(struct __sk_buff *skb)
skcpy(sk_ret, sk);
tpcpy(tp_ret, tp);
+ if (result_idx == EGRESS_SRV_IDX) {
+ /* The userspace has created it for srv sk */
+ pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, sk, 0, 0);
+ pkt_out_cnt10 = bpf_sk_storage_get(&sk_pkt_out_cnt10, sk,
+ 0, 0);
+ } else {
+ pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, sk,
+ &cli_cnt_init,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ pkt_out_cnt10 = bpf_sk_storage_get(&sk_pkt_out_cnt10,
+ sk, &cli_cnt_init,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ }
+
+ if (!pkt_out_cnt || !pkt_out_cnt10)
+ RETURN;
+
+ /* Even both cnt and cnt10 have lock defined in their BTF,
+ * intentionally one cnt takes lock while one does not
+ * as a test for the spinlock support in BPF_MAP_TYPE_SK_STORAGE.
+ */
+ pkt_out_cnt->cnt += 1;
+ bpf_spin_lock(&pkt_out_cnt10->lock);
+ pkt_out_cnt10->cnt += 10;
+ bpf_spin_unlock(&pkt_out_cnt10->lock);
+
RETURN;
}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
index ab56a6a72b7a..74370e7e286d 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
@@ -77,17 +77,52 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
struct v4hdr h_outer;
struct tcphdr tcph;
int olen, l2_len;
+ int tcp_off;
__u64 flags;
- if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
- sizeof(iph_inner)) < 0)
- return TC_ACT_OK;
+ /* Most tests encapsulate a packet into a tunnel with the same
+ * network protocol, and derive the outer header fields from
+ * the inner header.
+ *
+ * The 6in4 case tests different inner and outer protocols. As
+ * the inner is ipv6, but the outer expects an ipv4 header as
+ * input, manually build a struct iphdr based on the ipv6hdr.
+ */
+ if (encap_proto == IPPROTO_IPV6) {
+ const __u32 saddr = (192 << 24) | (168 << 16) | (1 << 8) | 1;
+ const __u32 daddr = (192 << 24) | (168 << 16) | (1 << 8) | 2;
+ struct ipv6hdr iph6_inner;
+
+ /* Read the IPv6 header */
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph6_inner,
+ sizeof(iph6_inner)) < 0)
+ return TC_ACT_OK;
+
+ /* Derive the IPv4 header fields from the IPv6 header */
+ memset(&iph_inner, 0, sizeof(iph_inner));
+ iph_inner.version = 4;
+ iph_inner.ihl = 5;
+ iph_inner.tot_len = bpf_htons(sizeof(iph6_inner) +
+ bpf_ntohs(iph6_inner.payload_len));
+ iph_inner.ttl = iph6_inner.hop_limit - 1;
+ iph_inner.protocol = iph6_inner.nexthdr;
+ iph_inner.saddr = __bpf_constant_htonl(saddr);
+ iph_inner.daddr = __bpf_constant_htonl(daddr);
+
+ tcp_off = sizeof(iph6_inner);
+ } else {
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
+ sizeof(iph_inner)) < 0)
+ return TC_ACT_OK;
+
+ tcp_off = sizeof(iph_inner);
+ }
/* filter only packets we want */
if (iph_inner.ihl != 5 || iph_inner.protocol != IPPROTO_TCP)
return TC_ACT_OK;
- if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(iph_inner),
+ if (bpf_skb_load_bytes(skb, ETH_HLEN + tcp_off,
&tcph, sizeof(tcph)) < 0)
return TC_ACT_OK;
@@ -129,6 +164,7 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
l2_len);
break;
case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
break;
default:
return TC_ACT_OK;
@@ -164,6 +200,17 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
BPF_F_INVALIDATE_HASH) < 0)
return TC_ACT_SHOT;
+ /* if changing outer proto type, update eth->h_proto */
+ if (encap_proto == IPPROTO_IPV6) {
+ struct ethhdr eth;
+
+ if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)) < 0)
+ return TC_ACT_SHOT;
+ eth.h_proto = bpf_htons(ETH_P_IP);
+ if (bpf_skb_store_bytes(skb, 0, &eth, sizeof(eth), 0) < 0)
+ return TC_ACT_SHOT;
+ }
+
return TC_ACT_OK;
}
@@ -325,6 +372,15 @@ int __encap_udp_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
+SEC("encap_sit_none")
+int __encap_sit_none(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+ return encap_ipv4(skb, IPPROTO_IPV6, ETH_P_IP);
+ else
+ return TC_ACT_OK;
+}
+
SEC("encap_ip6tnl_none")
int __encap_ip6tnl_none(struct __sk_buff *skb)
{
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index f8eb7987b794..42c1ce988945 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -24,6 +24,7 @@
#include "bpf_rlimit.h"
#include "bpf_util.h"
+#include "test_btf.h"
#define MAX_INSNS 512
#define MAX_SUBPROGS 16
@@ -58,68 +59,6 @@ static int __base_pr(enum libbpf_print_level level __attribute__((unused)),
return vfprintf(stderr, format, args);
}
-#define BTF_INFO_ENC(kind, kind_flag, vlen) \
- ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
-
-#define BTF_TYPE_ENC(name, info, size_or_type) \
- (name), (info), (size_or_type)
-
-#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
- ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
-#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
- BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
- BTF_INT_ENC(encoding, bits_offset, bits)
-
-#define BTF_FWD_ENC(name, kind_flag) \
- BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FWD, kind_flag, 0), 0)
-
-#define BTF_ARRAY_ENC(type, index_type, nr_elems) \
- (type), (index_type), (nr_elems)
-#define BTF_TYPE_ARRAY_ENC(type, index_type, nr_elems) \
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), \
- BTF_ARRAY_ENC(type, index_type, nr_elems)
-
-#define BTF_STRUCT_ENC(name, nr_elems, sz) \
- BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, nr_elems), sz)
-
-#define BTF_UNION_ENC(name, nr_elems, sz) \
- BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_UNION, 0, nr_elems), sz)
-
-#define BTF_VAR_ENC(name, type, linkage) \
- BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), type), (linkage)
-#define BTF_VAR_SECINFO_ENC(type, offset, size) \
- (type), (offset), (size)
-
-#define BTF_MEMBER_ENC(name, type, bits_offset) \
- (name), (type), (bits_offset)
-#define BTF_ENUM_ENC(name, val) (name), (val)
-#define BTF_MEMBER_OFFSET(bitfield_size, bits_offset) \
- ((bitfield_size) << 24 | (bits_offset))
-
-#define BTF_TYPEDEF_ENC(name, type) \
- BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), type)
-
-#define BTF_PTR_ENC(type) \
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), type)
-
-#define BTF_CONST_ENC(type) \
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), type)
-
-#define BTF_VOLATILE_ENC(type) \
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), type)
-
-#define BTF_RESTRICT_ENC(type) \
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), type)
-
-#define BTF_FUNC_PROTO_ENC(ret_type, nargs) \
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, nargs), ret_type)
-
-#define BTF_FUNC_PROTO_ARG_ENC(name, type) \
- (name), (type)
-
-#define BTF_FUNC_ENC(name, func_proto) \
- BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), func_proto)
-
#define BTF_END_RAW 0xdeadbeef
#define NAME_TBD 0xdeadb33f
diff --git a/tools/testing/selftests/bpf/test_btf.h b/tools/testing/selftests/bpf/test_btf.h
new file mode 100644
index 000000000000..2023725f1962
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_btf.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Facebook */
+
+#ifndef _TEST_BTF_H
+#define _TEST_BTF_H
+
+#define BTF_INFO_ENC(kind, kind_flag, vlen) \
+ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
+
+#define BTF_TYPE_ENC(name, info, size_or_type) \
+ (name), (info), (size_or_type)
+
+#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
+ ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
+#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
+ BTF_INT_ENC(encoding, bits_offset, bits)
+
+#define BTF_FWD_ENC(name, kind_flag) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FWD, kind_flag, 0), 0)
+
+#define BTF_ARRAY_ENC(type, index_type, nr_elems) \
+ (type), (index_type), (nr_elems)
+#define BTF_TYPE_ARRAY_ENC(type, index_type, nr_elems) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), \
+ BTF_ARRAY_ENC(type, index_type, nr_elems)
+
+#define BTF_STRUCT_ENC(name, nr_elems, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, nr_elems), sz)
+
+#define BTF_UNION_ENC(name, nr_elems, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_UNION, 0, nr_elems), sz)
+
+#define BTF_VAR_ENC(name, type, linkage) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), type), (linkage)
+#define BTF_VAR_SECINFO_ENC(type, offset, size) \
+ (type), (offset), (size)
+
+#define BTF_MEMBER_ENC(name, type, bits_offset) \
+ (name), (type), (bits_offset)
+#define BTF_ENUM_ENC(name, val) (name), (val)
+#define BTF_MEMBER_OFFSET(bitfield_size, bits_offset) \
+ ((bitfield_size) << 24 | (bits_offset))
+
+#define BTF_TYPEDEF_ENC(name, type) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), type)
+
+#define BTF_PTR_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), type)
+
+#define BTF_CONST_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), type)
+
+#define BTF_VOLATILE_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), type)
+
+#define BTF_RESTRICT_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), type)
+
+#define BTF_FUNC_PROTO_ENC(ret_type, nargs) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, nargs), ret_type)
+
+#define BTF_FUNC_PROTO_ARG_ENC(name, type) \
+ (name), (type)
+
+#define BTF_FUNC_ENC(name, func_proto) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), func_proto)
+
+#endif /* _TEST_BTF_H */
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 3c627771f965..246f745cb006 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -27,6 +27,7 @@
#include "bpf_util.h"
#include "bpf_rlimit.h"
+#include "test_maps.h"
#ifndef ENOTSUPP
#define ENOTSUPP 524
@@ -36,15 +37,6 @@ static int skips;
static int map_flags;
-#define CHECK(condition, tag, format...) ({ \
- int __ret = !!(condition); \
- if (__ret) { \
- printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \
- printf(format); \
- exit(-1); \
- } \
-})
-
static void test_hashmap(unsigned int task, void *data)
{
long long key, next_key, first_key, value;
@@ -1703,6 +1695,10 @@ static void run_all_tests(void)
test_map_in_map();
}
+#define DECLARE
+#include <map_tests/tests.h>
+#undef DECLARE
+
int main(void)
{
srand(time(NULL));
@@ -1713,6 +1709,10 @@ int main(void)
map_flags = BPF_F_NO_PREALLOC;
run_all_tests();
+#define CALL
+#include <map_tests/tests.h>
+#undef CALL
+
printf("test_maps: OK, %d SKIPPED\n", skips);
return 0;
}
diff --git a/tools/testing/selftests/bpf/test_maps.h b/tools/testing/selftests/bpf/test_maps.h
new file mode 100644
index 000000000000..77d8587ac4ed
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_maps.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TEST_MAPS_H
+#define _TEST_MAPS_H
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#define CHECK(condition, tag, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \
+ printf(format); \
+ exit(-1); \
+ } \
+})
+
+#endif
diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c
index dcae7f664dce..e089477fa0a3 100644
--- a/tools/testing/selftests/bpf/test_sock_fields.c
+++ b/tools/testing/selftests/bpf/test_sock_fields.c
@@ -35,6 +35,11 @@ enum bpf_linum_array_idx {
__NR_BPF_LINUM_ARRAY_IDX,
};
+struct bpf_spinlock_cnt {
+ struct bpf_spin_lock lock;
+ __u32 cnt;
+};
+
#define CHECK(condition, tag, format...) ({ \
int __ret = !!(condition); \
if (__ret) { \
@@ -50,6 +55,8 @@ enum bpf_linum_array_idx {
#define DATA_LEN sizeof(DATA)
static struct sockaddr_in6 srv_sa6, cli_sa6;
+static int sk_pkt_out_cnt10_fd;
+static int sk_pkt_out_cnt_fd;
static int linum_map_fd;
static int addr_map_fd;
static int tp_map_fd;
@@ -220,28 +227,90 @@ static void check_result(void)
"Unexpected listen_tp", "Check listen_tp output. ingress_linum:%u",
ingress_linum);
- CHECK(srv_tp.data_segs_out != 1 ||
+ CHECK(srv_tp.data_segs_out != 2 ||
srv_tp.data_segs_in ||
srv_tp.snd_cwnd != 10 ||
srv_tp.total_retrans ||
- srv_tp.bytes_acked != DATA_LEN,
+ srv_tp.bytes_acked != 2 * DATA_LEN,
"Unexpected srv_tp", "Check srv_tp output. egress_linum:%u",
egress_linum);
CHECK(cli_tp.data_segs_out ||
- cli_tp.data_segs_in != 1 ||
+ cli_tp.data_segs_in != 2 ||
cli_tp.snd_cwnd != 10 ||
cli_tp.total_retrans ||
- cli_tp.bytes_received != DATA_LEN,
+ cli_tp.bytes_received != 2 * DATA_LEN,
"Unexpected cli_tp", "Check cli_tp output. egress_linum:%u",
egress_linum);
}
+static void check_sk_pkt_out_cnt(int accept_fd, int cli_fd)
+{
+ struct bpf_spinlock_cnt pkt_out_cnt = {}, pkt_out_cnt10 = {};
+ int err;
+
+ pkt_out_cnt.cnt = ~0;
+ pkt_out_cnt10.cnt = ~0;
+ err = bpf_map_lookup_elem(sk_pkt_out_cnt_fd, &accept_fd, &pkt_out_cnt);
+ if (!err)
+ err = bpf_map_lookup_elem(sk_pkt_out_cnt10_fd, &accept_fd,
+ &pkt_out_cnt10);
+
+ /* The bpf prog only counts for fullsock and
+ * passive conneciton did not become fullsock until 3WHS
+ * had been finished.
+ * The bpf prog only counted two data packet out but we
+ * specially init accept_fd's pkt_out_cnt by 2 in
+ * init_sk_storage(). Hence, 4 here.
+ */
+ CHECK(err || pkt_out_cnt.cnt != 4 || pkt_out_cnt10.cnt != 40,
+ "bpf_map_lookup_elem(sk_pkt_out_cnt, &accept_fd)",
+ "err:%d errno:%d pkt_out_cnt:%u pkt_out_cnt10:%u",
+ err, errno, pkt_out_cnt.cnt, pkt_out_cnt10.cnt);
+
+ pkt_out_cnt.cnt = ~0;
+ pkt_out_cnt10.cnt = ~0;
+ err = bpf_map_lookup_elem(sk_pkt_out_cnt_fd, &cli_fd, &pkt_out_cnt);
+ if (!err)
+ err = bpf_map_lookup_elem(sk_pkt_out_cnt10_fd, &cli_fd,
+ &pkt_out_cnt10);
+ /* Active connection is fullsock from the beginning.
+ * 1 SYN and 1 ACK during 3WHS
+ * 2 Acks on data packet.
+ *
+ * The bpf_prog initialized it to 0xeB9F.
+ */
+ CHECK(err || pkt_out_cnt.cnt != 0xeB9F + 4 ||
+ pkt_out_cnt10.cnt != 0xeB9F + 40,
+ "bpf_map_lookup_elem(sk_pkt_out_cnt, &cli_fd)",
+ "err:%d errno:%d pkt_out_cnt:%u pkt_out_cnt10:%u",
+ err, errno, pkt_out_cnt.cnt, pkt_out_cnt10.cnt);
+}
+
+static void init_sk_storage(int sk_fd, __u32 pkt_out_cnt)
+{
+ struct bpf_spinlock_cnt scnt = {};
+ int err;
+
+ scnt.cnt = pkt_out_cnt;
+ err = bpf_map_update_elem(sk_pkt_out_cnt_fd, &sk_fd, &scnt,
+ BPF_NOEXIST);
+ CHECK(err, "bpf_map_update_elem(sk_pkt_out_cnt_fd)",
+ "err:%d errno:%d", err, errno);
+
+ scnt.cnt *= 10;
+ err = bpf_map_update_elem(sk_pkt_out_cnt10_fd, &sk_fd, &scnt,
+ BPF_NOEXIST);
+ CHECK(err, "bpf_map_update_elem(sk_pkt_out_cnt10_fd)",
+ "err:%d errno:%d", err, errno);
+}
+
static void test(void)
{
int listen_fd, cli_fd, accept_fd, epfd, err;
struct epoll_event ev;
socklen_t addrlen;
+ int i;
addrlen = sizeof(struct sockaddr_in6);
ev.events = EPOLLIN;
@@ -308,24 +377,30 @@ static void test(void)
accept_fd, errno);
close(listen_fd);
- /* Send some data from accept_fd to cli_fd */
- err = send(accept_fd, DATA, DATA_LEN, 0);
- CHECK(err != DATA_LEN, "send(accept_fd)", "err:%d errno:%d",
- err, errno);
-
- /* Have some timeout in recv(cli_fd). Just in case. */
ev.data.fd = cli_fd;
err = epoll_ctl(epfd, EPOLL_CTL_ADD, cli_fd, &ev);
CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, cli_fd)", "err:%d errno:%d",
err, errno);
- err = epoll_wait(epfd, &ev, 1, 1000);
- CHECK(err != 1 || ev.data.fd != cli_fd,
- "epoll_wait(cli_fd)", "err:%d errno:%d ev.data.fd:%d cli_fd:%d",
- err, errno, ev.data.fd, cli_fd);
+ init_sk_storage(accept_fd, 2);
- err = recv(cli_fd, NULL, 0, MSG_TRUNC);
- CHECK(err, "recv(cli_fd)", "err:%d errno:%d", err, errno);
+ for (i = 0; i < 2; i++) {
+ /* Send some data from accept_fd to cli_fd */
+ err = send(accept_fd, DATA, DATA_LEN, 0);
+ CHECK(err != DATA_LEN, "send(accept_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Have some timeout in recv(cli_fd). Just in case. */
+ err = epoll_wait(epfd, &ev, 1, 1000);
+ CHECK(err != 1 || ev.data.fd != cli_fd,
+ "epoll_wait(cli_fd)", "err:%d errno:%d ev.data.fd:%d cli_fd:%d",
+ err, errno, ev.data.fd, cli_fd);
+
+ err = recv(cli_fd, NULL, 0, MSG_TRUNC);
+ CHECK(err, "recv(cli_fd)", "err:%d errno:%d", err, errno);
+ }
+
+ check_sk_pkt_out_cnt(accept_fd, cli_fd);
close(epfd);
close(accept_fd);
@@ -395,6 +470,14 @@ int main(int argc, char **argv)
CHECK(!map, "cannot find linum_map", "(null)");
linum_map_fd = bpf_map__fd(map);
+ map = bpf_object__find_map_by_name(obj, "sk_pkt_out_cnt");
+ CHECK(!map, "cannot find sk_pkt_out_cnt", "(null)");
+ sk_pkt_out_cnt_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "sk_pkt_out_cnt10");
+ CHECK(!map, "cannot find sk_pkt_out_cnt10", "(null)");
+ sk_pkt_out_cnt10_fd = bpf_map__fd(map);
+
test();
bpf_object__close(obj);
diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
index d4d8d5d3b06e..ff0d31d38061 100755
--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
@@ -97,6 +97,9 @@ if [[ "$#" -eq "0" ]]; then
echo "ip6ip6"
$0 ipv6 ip6tnl none 100
+ echo "sit"
+ $0 ipv6 sit none 100
+
for mac in none mpls eth ; do
echo "ip gre $mac"
$0 ipv4 gre $mac 100
@@ -211,11 +214,20 @@ else
targs=""
fi
+# tunnel address family differs from inner for SIT
+if [[ "${tuntype}" == "sit" ]]; then
+ link_addr1="${ns1_v4}"
+ link_addr2="${ns2_v4}"
+else
+ link_addr1="${addr1}"
+ link_addr2="${addr2}"
+fi
+
# serverside, insert decap module
# server is still running
# client can connect again
ip netns exec "${ns2}" ip link add name testtun0 type "${ttype}" \
- ${tmode} remote "${addr1}" local "${addr2}" $targs
+ ${tmode} remote "${link_addr1}" local "${link_addr2}" $targs
expect_tun_fail=0
@@ -260,6 +272,12 @@ else
server_listen
fi
+# bpf_skb_net_shrink does not take tunnel flags yet, cannot update L3.
+if [[ "${tuntype}" == "sit" ]]; then
+ echo OK
+ exit 0
+fi
+
# serverside, use BPF for decap
ip netns exec "${ns2}" ip link del dev testtun0
ip netns exec "${ns2}" tc qdisc add dev veth2 clsact
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index ed9e894afef3..ccd896b98cac 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -47,12 +47,13 @@
#include "bpf_rlimit.h"
#include "bpf_rand.h"
#include "bpf_util.h"
+#include "test_btf.h"
#include "../../../include/linux/filter.h"
#define MAX_INSNS BPF_MAXINSNS
#define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 17
+#define MAX_NR_MAPS 18
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
@@ -85,6 +86,7 @@ struct bpf_test {
int fixup_map_array_ro[MAX_FIXUPS];
int fixup_map_array_wo[MAX_FIXUPS];
int fixup_map_array_small[MAX_FIXUPS];
+ int fixup_sk_storage_map[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
uint32_t retval, retval_unpriv, insn_processed;
@@ -497,24 +499,6 @@ static int create_cgroup_storage(bool percpu)
return fd;
}
-#define BTF_INFO_ENC(kind, kind_flag, vlen) \
- ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
-#define BTF_TYPE_ENC(name, info, size_or_type) \
- (name), (info), (size_or_type)
-#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
- ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
-#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
- BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
- BTF_INT_ENC(encoding, bits_offset, bits)
-#define BTF_MEMBER_ENC(name, type, bits_offset) \
- (name), (type), (bits_offset)
-
-struct btf_raw_data {
- __u32 raw_types[64];
- const char *str_sec;
- __u32 str_sec_size;
-};
-
/* struct bpf_spin_lock {
* int val;
* };
@@ -589,6 +573,31 @@ static int create_map_spin_lock(void)
return fd;
}
+static int create_sk_storage_map(void)
+{
+ struct bpf_create_map_attr attr = {
+ .name = "test_map",
+ .map_type = BPF_MAP_TYPE_SK_STORAGE,
+ .key_size = 4,
+ .value_size = 8,
+ .max_entries = 0,
+ .map_flags = BPF_F_NO_PREALLOC,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 3,
+ };
+ int fd, btf_fd;
+
+ btf_fd = load_btf();
+ if (btf_fd < 0)
+ return -1;
+ attr.btf_fd = btf_fd;
+ fd = bpf_create_map_xattr(&attr);
+ close(attr.btf_fd);
+ if (fd < 0)
+ printf("Failed to create sk_storage_map\n");
+ return fd;
+}
+
static char bpf_vlog[UINT_MAX >> 8];
static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
@@ -611,6 +620,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_map_array_ro = test->fixup_map_array_ro;
int *fixup_map_array_wo = test->fixup_map_array_wo;
int *fixup_map_array_small = test->fixup_map_array_small;
+ int *fixup_sk_storage_map = test->fixup_sk_storage_map;
if (test->fill_helper) {
test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
@@ -765,6 +775,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_map_array_small++;
} while (*fixup_map_array_small);
}
+ if (*fixup_sk_storage_map) {
+ map_fds[17] = create_sk_storage_map();
+ do {
+ prog[*fixup_sk_storage_map].imm = map_fds[17];
+ fixup_sk_storage_map++;
+ } while (*fixup_sk_storage_map);
+ }
}
static int set_admin(bool admin)
diff --git a/tools/testing/selftests/bpf/verifier/raw_tp_writable.c b/tools/testing/selftests/bpf/verifier/raw_tp_writable.c
new file mode 100644
index 000000000000..95b5d70a1dc1
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/raw_tp_writable.c
@@ -0,0 +1,34 @@
+{
+ "raw_tracepoint_writable: reject variable offset",
+ .insns = {
+ /* r6 is our tp buffer */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ /* move the key (== 0) to r10-8 */
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ /* lookup in the map */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+
+ /* exit clean if null */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* shift the buffer pointer to a variable location */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_0),
+ /* clobber whatever's there */
+ BPF_MOV64_IMM(BPF_REG_7, 4242),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, 0),
+
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1, },
+ .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
+ .errstr = "R6 invalid variable buffer offset: off=0, var_off=(0x0; 0xffffffff)",
+},
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
index 416436231fab..b31cd2cf50d0 100644
--- a/tools/testing/selftests/bpf/verifier/sock.c
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -382,3 +382,119 @@
.result = REJECT,
.errstr = "reference has not been acquired before",
},
+{
+ "sk_storage_get(map, skb->sk, NULL, 0): value == NULL",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_sk_storage_map = { 11 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "sk_storage_get(map, skb->sk, 1, 1): value == 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_sk_storage_map = { 11 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "R3 type=inv expected=fp",
+},
+{
+ "sk_storage_get(map, skb->sk, &stack_value, 1): stack_value",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_sk_storage_map = { 14 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "sk_storage_get(map, skb->sk, &stack_value, 1): partially init stack_value",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_sk_storage_map = { 14 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid indirect read from stack",
+},
+{
+ "bpf_map_lookup_elem(smap, &key)",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_sk_storage_map = { 3 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "cannot pass map_type 24 into func bpf_map_lookup_elem",
+},