aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/bpf')
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x2
-rw-r--r--tools/testing/selftests/bpf/config2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/autoattach.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_nf.c60
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_pt_regs.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/time_tai.c74
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c94
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_destructive.c14
-rw-r--r--tools/testing/selftests/bpf/progs/lsm.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_autoattach.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_cookie.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_helper_restricted.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_time_tai.c24
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh34
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c166
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h8
21 files changed, 514 insertions, 98 deletions
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index e33cab34d22f..9d8de15e725e 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -43,7 +43,7 @@ test_bpffs # bpffs test failed 255
test_bprm_opts # failed to auto-attach program 'secure_exec': -524 (trampoline)
test_ima # failed to auto-attach program 'ima': -524 (trampoline)
test_local_storage # failed to auto-attach program 'unlink_hook': -524 (trampoline)
-test_lsm # failed to find kernel BTF type ID of '__x64_sys_setdomainname': -3 (?)
+test_lsm # attach unexpected error: -524 (trampoline)
test_overhead # attach_fentry unexpected error: -524 (trampoline)
test_profiler # unknown func bpf_probe_read_str#45 (overlapping)
timer # failed to auto-attach program 'test1': -524 (trampoline)
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index fabf0c014349..3fc46f9cfb22 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -50,9 +50,11 @@ CONFIG_NET_SCHED=y
CONFIG_NETDEVSIM=m
CONFIG_NETFILTER=y
CONFIG_NETFILTER_SYNPROXY=y
+CONFIG_NETFILTER_XT_CONNMARK=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_TARGET_CT=y
CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_MARK=y
CONFIG_NF_DEFRAG_IPV4=y
CONFIG_NF_DEFRAG_IPV6=y
CONFIG_RC_CORE=y
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index 0b899d2d8ea7..9566d9d2f6ee 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -6,19 +6,19 @@
volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes")));
/* uprobe attach point */
-static void trigger_func(void)
+static noinline void trigger_func(void)
{
asm volatile ("");
}
/* attach point for byname uprobe */
-static void trigger_func2(void)
+static noinline void trigger_func2(void)
{
asm volatile ("");
}
/* attach point for byname sleepable uprobe */
-static void trigger_func3(void)
+static noinline void trigger_func3(void)
{
asm volatile ("");
}
diff --git a/tools/testing/selftests/bpf/prog_tests/autoattach.c b/tools/testing/selftests/bpf/prog_tests/autoattach.c
new file mode 100644
index 000000000000..dc5e01d279bd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/autoattach.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+
+#include <test_progs.h>
+#include "test_autoattach.skel.h"
+
+void test_autoattach(void)
+{
+ struct test_autoattach *skel;
+
+ skel = test_autoattach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ goto cleanup;
+
+ /* disable auto-attach for prog2 */
+ bpf_program__set_autoattach(skel->progs.prog2, false);
+ ASSERT_TRUE(bpf_program__autoattach(skel->progs.prog1), "autoattach_prog1");
+ ASSERT_FALSE(bpf_program__autoattach(skel->progs.prog2), "autoattach_prog2");
+ if (!ASSERT_OK(test_autoattach__attach(skel), "skel_attach"))
+ goto cleanup;
+
+ usleep(1);
+
+ ASSERT_TRUE(skel->bss->prog1_called, "attached_prog1");
+ ASSERT_FALSE(skel->bss->prog2_called, "attached_prog2");
+
+cleanup:
+ test_autoattach__destroy(skel);
+}
+
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index 2974b44f80fa..2be2d61954bc 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -13,7 +13,7 @@
#include "kprobe_multi.skel.h"
/* uprobe attach point */
-static void trigger_func(void)
+static noinline void trigger_func(void)
{
asm volatile ("");
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
index 7a74a1579076..544bf90ac2a7 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
@@ -24,10 +24,34 @@ enum {
TEST_TC_BPF,
};
+#define TIMEOUT_MS 3000
+
+static int connect_to_server(int srv_fd)
+{
+ int fd = -1;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (!ASSERT_GE(fd, 0, "socket"))
+ goto out;
+
+ if (!ASSERT_EQ(connect_fd_to_fd(fd, srv_fd, TIMEOUT_MS), 0, "connect_fd_to_fd")) {
+ close(fd);
+ fd = -1;
+ }
+out:
+ return fd;
+}
+
static void test_bpf_nf_ct(int mode)
{
+ const char *iptables = "iptables -t raw %s PREROUTING -j CONNMARK --set-mark 42/0";
+ int srv_fd = -1, client_fd = -1, srv_client_fd = -1;
+ struct sockaddr_in peer_addr = {};
struct test_bpf_nf *skel;
int prog_fd, err;
+ socklen_t len;
+ u16 srv_port;
+ char cmd[64];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
@@ -38,6 +62,32 @@ static void test_bpf_nf_ct(int mode)
if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load"))
return;
+ /* Enable connection tracking */
+ snprintf(cmd, sizeof(cmd), iptables, "-A");
+ if (!ASSERT_OK(system(cmd), "iptables"))
+ goto end;
+
+ srv_port = (mode == TEST_XDP) ? 5005 : 5006;
+ srv_fd = start_server(AF_INET, SOCK_STREAM, "127.0.0.1", srv_port, TIMEOUT_MS);
+ if (!ASSERT_GE(srv_fd, 0, "start_server"))
+ goto end;
+
+ client_fd = connect_to_server(srv_fd);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_server"))
+ goto end;
+
+ len = sizeof(peer_addr);
+ srv_client_fd = accept(srv_fd, (struct sockaddr *)&peer_addr, &len);
+ if (!ASSERT_GE(srv_client_fd, 0, "accept"))
+ goto end;
+ if (!ASSERT_EQ(len, sizeof(struct sockaddr_in), "sockaddr len"))
+ goto end;
+
+ skel->bss->saddr = peer_addr.sin_addr.s_addr;
+ skel->bss->sport = peer_addr.sin_port;
+ skel->bss->daddr = peer_addr.sin_addr.s_addr;
+ skel->bss->dport = htons(srv_port);
+
if (mode == TEST_XDP)
prog_fd = bpf_program__fd(skel->progs.nf_xdp_ct_test);
else
@@ -63,7 +113,17 @@ static void test_bpf_nf_ct(int mode)
ASSERT_LE(skel->bss->test_delta_timeout, 10, "Test for max ct timeout update");
/* expected status is IPS_SEEN_REPLY */
ASSERT_EQ(skel->bss->test_status, 2, "Test for ct status update ");
+ ASSERT_EQ(skel->data->test_exist_lookup, 0, "Test existing connection lookup");
+ ASSERT_EQ(skel->bss->test_exist_lookup_mark, 43, "Test existing connection lookup ctmark");
end:
+ if (srv_client_fd != -1)
+ close(srv_client_fd);
+ if (client_fd != -1)
+ close(client_fd);
+ if (srv_fd != -1)
+ close(srv_fd);
+ snprintf(cmd, sizeof(cmd), iptables, "-D");
+ system(cmd);
test_bpf_nf__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c
index 3c7aa82b98e2..bcf80b9f7c27 100644
--- a/tools/testing/selftests/bpf/prog_tests/dynptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c
@@ -22,7 +22,8 @@ static struct {
{"add_dynptr_to_map2", "invalid indirect read from stack"},
{"data_slice_out_of_bounds_ringbuf", "value is outside of the allowed memory range"},
{"data_slice_out_of_bounds_map_value", "value is outside of the allowed memory range"},
- {"data_slice_use_after_release", "invalid mem access 'scalar'"},
+ {"data_slice_use_after_release1", "invalid mem access 'scalar'"},
+ {"data_slice_use_after_release2", "invalid mem access 'scalar'"},
{"data_slice_missing_null_check1", "invalid mem access 'mem_or_null'"},
{"data_slice_missing_null_check2", "invalid mem access 'mem_or_null'"},
{"invalid_helper1", "invalid indirect read from stack"},
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
index c00eb974eb85..351fafa006fb 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
@@ -5,6 +5,9 @@
#include "kfunc_call_test.lskel.h"
#include "kfunc_call_test_subprog.skel.h"
#include "kfunc_call_test_subprog.lskel.h"
+#include "kfunc_call_destructive.skel.h"
+
+#include "cap_helpers.h"
static void test_main(void)
{
@@ -86,6 +89,36 @@ static void test_subprog_lskel(void)
kfunc_call_test_subprog_lskel__destroy(skel);
}
+static int test_destructive_open_and_load(void)
+{
+ struct kfunc_call_destructive *skel;
+ int err;
+
+ skel = kfunc_call_destructive__open();
+ if (!ASSERT_OK_PTR(skel, "prog_open"))
+ return -1;
+
+ err = kfunc_call_destructive__load(skel);
+
+ kfunc_call_destructive__destroy(skel);
+
+ return err;
+}
+
+static void test_destructive(void)
+{
+ __u64 save_caps = 0;
+
+ ASSERT_OK(test_destructive_open_and_load(), "succesful_load");
+
+ if (!ASSERT_OK(cap_disable_effective(1ULL << CAP_SYS_BOOT, &save_caps), "drop_caps"))
+ return;
+
+ ASSERT_EQ(test_destructive_open_and_load(), -13, "no_caps_failure");
+
+ cap_enable_effective(save_caps, NULL);
+}
+
void test_kfunc_call(void)
{
if (test__start_subtest("main"))
@@ -96,4 +129,7 @@ void test_kfunc_call(void)
if (test__start_subtest("subprog_lskel"))
test_subprog_lskel();
+
+ if (test__start_subtest("destructive"))
+ test_destructive();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c
index 61935e7e056a..f000734a3d1f 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c
@@ -4,7 +4,7 @@
#include "test_task_pt_regs.skel.h"
/* uprobe attach point */
-static void trigger_func(void)
+static noinline void trigger_func(void)
{
asm volatile ("");
}
diff --git a/tools/testing/selftests/bpf/prog_tests/time_tai.c b/tools/testing/selftests/bpf/prog_tests/time_tai.c
new file mode 100644
index 000000000000..a31119823666
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/time_tai.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Linutronix GmbH */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "test_time_tai.skel.h"
+
+#include <time.h>
+#include <stdint.h>
+
+#define TAI_THRESHOLD 1000000000ULL /* 1s */
+#define NSEC_PER_SEC 1000000000ULL
+
+static __u64 ts_to_ns(const struct timespec *ts)
+{
+ return ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec;
+}
+
+void test_time_tai(void)
+{
+ struct __sk_buff skb = {
+ .cb[0] = 0,
+ .cb[1] = 0,
+ .tstamp = 0,
+ };
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ .ctx_out = &skb,
+ .ctx_size_out = sizeof(skb),
+ );
+ struct test_time_tai *skel;
+ struct timespec now_tai;
+ __u64 ts1, ts2, now;
+ int ret, prog_fd;
+
+ /* Open and load */
+ skel = test_time_tai__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "tai_open"))
+ return;
+
+ /* Run test program */
+ prog_fd = bpf_program__fd(skel->progs.time_tai);
+ ret = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(ret, "test_run");
+
+ /* Retrieve generated TAI timestamps */
+ ts1 = skb.tstamp;
+ ts2 = skb.cb[0] | ((__u64)skb.cb[1] << 32);
+
+ /* TAI != 0 */
+ ASSERT_NEQ(ts1, 0, "tai_ts1");
+ ASSERT_NEQ(ts2, 0, "tai_ts2");
+
+ /* TAI is moving forward only */
+ ASSERT_GT(ts2, ts1, "tai_forward");
+
+ /* Check for future */
+ ret = clock_gettime(CLOCK_TAI, &now_tai);
+ ASSERT_EQ(ret, 0, "tai_gettime");
+ now = ts_to_ns(&now_tai);
+
+ ASSERT_TRUE(now > ts1, "tai_future_ts1");
+ ASSERT_TRUE(now > ts2, "tai_future_ts2");
+
+ /* Check for reasonable range */
+ ASSERT_TRUE(now - ts1 < TAI_THRESHOLD, "tai_range_ts1");
+ ASSERT_TRUE(now - ts2 < TAI_THRESHOLD, "tai_range_ts2");
+
+ test_time_tai__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index 0a26c243e6e9..b0f08ff024fb 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -65,7 +65,7 @@ static int get_map_val_dynptr(struct bpf_dynptr *ptr)
/* Every bpf_ringbuf_reserve_dynptr call must have a corresponding
* bpf_ringbuf_submit/discard_dynptr call
*/
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int ringbuf_missing_release1(void *ctx)
{
struct bpf_dynptr ptr;
@@ -77,7 +77,7 @@ int ringbuf_missing_release1(void *ctx)
return 0;
}
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int ringbuf_missing_release2(void *ctx)
{
struct bpf_dynptr ptr1, ptr2;
@@ -112,7 +112,7 @@ static int missing_release_callback_fn(__u32 index, void *data)
}
/* Any dynptr initialized within a callback must have bpf_dynptr_put called */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int ringbuf_missing_release_callback(void *ctx)
{
bpf_loop(10, missing_release_callback_fn, NULL, 0);
@@ -120,7 +120,7 @@ int ringbuf_missing_release_callback(void *ctx)
}
/* Can't call bpf_ringbuf_submit/discard_dynptr on a non-initialized dynptr */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int ringbuf_release_uninit_dynptr(void *ctx)
{
struct bpf_dynptr ptr;
@@ -132,7 +132,7 @@ int ringbuf_release_uninit_dynptr(void *ctx)
}
/* A dynptr can't be used after it has been invalidated */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int use_after_invalid(void *ctx)
{
struct bpf_dynptr ptr;
@@ -151,7 +151,7 @@ int use_after_invalid(void *ctx)
}
/* Can't call non-dynptr ringbuf APIs on a dynptr ringbuf sample */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int ringbuf_invalid_api(void *ctx)
{
struct bpf_dynptr ptr;
@@ -173,7 +173,7 @@ done:
}
/* Can't add a dynptr to a map */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int add_dynptr_to_map1(void *ctx)
{
struct bpf_dynptr ptr;
@@ -190,7 +190,7 @@ int add_dynptr_to_map1(void *ctx)
}
/* Can't add a struct with an embedded dynptr to a map */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int add_dynptr_to_map2(void *ctx)
{
struct test_info x;
@@ -207,7 +207,7 @@ int add_dynptr_to_map2(void *ctx)
}
/* A data slice can't be accessed out of bounds */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int data_slice_out_of_bounds_ringbuf(void *ctx)
{
struct bpf_dynptr ptr;
@@ -227,7 +227,7 @@ done:
return 0;
}
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int data_slice_out_of_bounds_map_value(void *ctx)
{
__u32 key = 0, map_val;
@@ -247,8 +247,8 @@ int data_slice_out_of_bounds_map_value(void *ctx)
}
/* A data slice can't be used after it has been released */
-SEC("?raw_tp/sys_nanosleep")
-int data_slice_use_after_release(void *ctx)
+SEC("?raw_tp")
+int data_slice_use_after_release1(void *ctx)
{
struct bpf_dynptr ptr;
struct sample *sample;
@@ -272,8 +272,44 @@ done:
return 0;
}
+/* A data slice can't be used after it has been released.
+ *
+ * This tests the case where the data slice tracks a dynptr (ptr2)
+ * that is at a non-zero offset from the frame pointer (ptr1 is at fp,
+ * ptr2 is at fp - 16).
+ */
+SEC("?raw_tp")
+int data_slice_use_after_release2(void *ctx)
+{
+ struct bpf_dynptr ptr1, ptr2;
+ struct sample *sample;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr1);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
+
+ sample = bpf_dynptr_data(&ptr2, 0, sizeof(*sample));
+ if (!sample)
+ goto done;
+
+ sample->pid = 23;
+
+ bpf_ringbuf_submit_dynptr(&ptr2, 0);
+
+ /* this should fail */
+ sample->pid = 23;
+
+ bpf_ringbuf_submit_dynptr(&ptr1, 0);
+
+ return 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ return 0;
+}
+
/* A data slice must be first checked for NULL */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int data_slice_missing_null_check1(void *ctx)
{
struct bpf_dynptr ptr;
@@ -293,7 +329,7 @@ int data_slice_missing_null_check1(void *ctx)
}
/* A data slice can't be dereferenced if it wasn't checked for null */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int data_slice_missing_null_check2(void *ctx)
{
struct bpf_dynptr ptr;
@@ -315,7 +351,7 @@ done:
/* Can't pass in a dynptr as an arg to a helper function that doesn't take in a
* dynptr argument
*/
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int invalid_helper1(void *ctx)
{
struct bpf_dynptr ptr;
@@ -329,7 +365,7 @@ int invalid_helper1(void *ctx)
}
/* A dynptr can't be passed into a helper function at a non-zero offset */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int invalid_helper2(void *ctx)
{
struct bpf_dynptr ptr;
@@ -344,7 +380,7 @@ int invalid_helper2(void *ctx)
}
/* A bpf_dynptr is invalidated if it's been written into */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int invalid_write1(void *ctx)
{
struct bpf_dynptr ptr;
@@ -365,7 +401,7 @@ int invalid_write1(void *ctx)
* A bpf_dynptr can't be used as a dynptr if it has been written into at a fixed
* offset
*/
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int invalid_write2(void *ctx)
{
struct bpf_dynptr ptr;
@@ -388,7 +424,7 @@ int invalid_write2(void *ctx)
* A bpf_dynptr can't be used as a dynptr if it has been written into at a
* non-const offset
*/
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int invalid_write3(void *ctx)
{
struct bpf_dynptr ptr;
@@ -419,7 +455,7 @@ static int invalid_write4_callback(__u32 index, void *data)
/* If the dynptr is written into in a callback function, it should
* be invalidated as a dynptr
*/
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int invalid_write4(void *ctx)
{
struct bpf_dynptr ptr;
@@ -436,7 +472,7 @@ int invalid_write4(void *ctx)
/* A globally-defined bpf_dynptr can't be used (it must reside as a stack frame) */
struct bpf_dynptr global_dynptr;
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int global(void *ctx)
{
/* this should fail */
@@ -448,7 +484,7 @@ int global(void *ctx)
}
/* A direct read should fail */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int invalid_read1(void *ctx)
{
struct bpf_dynptr ptr;
@@ -464,7 +500,7 @@ int invalid_read1(void *ctx)
}
/* A direct read at an offset should fail */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int invalid_read2(void *ctx)
{
struct bpf_dynptr ptr;
@@ -479,7 +515,7 @@ int invalid_read2(void *ctx)
}
/* A direct read at an offset into the lower stack slot should fail */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int invalid_read3(void *ctx)
{
struct bpf_dynptr ptr1, ptr2;
@@ -505,7 +541,7 @@ static int invalid_read4_callback(__u32 index, void *data)
}
/* A direct read within a callback function should fail */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int invalid_read4(void *ctx)
{
struct bpf_dynptr ptr;
@@ -520,7 +556,7 @@ int invalid_read4(void *ctx)
}
/* Initializing a dynptr on an offset should fail */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int invalid_offset(void *ctx)
{
struct bpf_dynptr ptr;
@@ -534,7 +570,7 @@ int invalid_offset(void *ctx)
}
/* Can't release a dynptr twice */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int release_twice(void *ctx)
{
struct bpf_dynptr ptr;
@@ -560,7 +596,7 @@ static int release_twice_callback_fn(__u32 index, void *data)
/* Test that releasing a dynptr twice, where one of the releases happens
* within a calback function, fails
*/
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int release_twice_callback(void *ctx)
{
struct bpf_dynptr ptr;
@@ -575,7 +611,7 @@ int release_twice_callback(void *ctx)
}
/* Reject unsupported local mem types for dynptr_from_mem API */
-SEC("?raw_tp/sys_nanosleep")
+SEC("?raw_tp")
int dynptr_from_mem_invalid_api(void *ctx)
{
struct bpf_dynptr ptr;
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c
new file mode 100644
index 000000000000..767472bc5a97
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+extern void bpf_kfunc_call_test_destructive(void) __ksym;
+
+SEC("tc")
+int kfunc_destructive_test(void)
+{
+ bpf_kfunc_call_test_destructive();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/lsm.c b/tools/testing/selftests/bpf/progs/lsm.c
index 33694ef8acfa..d8d8af623bc2 100644
--- a/tools/testing/selftests/bpf/progs/lsm.c
+++ b/tools/testing/selftests/bpf/progs/lsm.c
@@ -4,6 +4,7 @@
* Copyright 2020 Google LLC.
*/
+#include "bpf_misc.h"
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -160,7 +161,7 @@ int BPF_PROG(test_task_free, struct task_struct *task)
int copy_test = 0;
-SEC("fentry.s/__x64_sys_setdomainname")
+SEC("fentry.s/" SYS_PREFIX "sys_setdomainname")
int BPF_PROG(test_sys_setdomainname, struct pt_regs *regs)
{
void *ptr = (void *)PT_REGS_PARM1(regs);
diff --git a/tools/testing/selftests/bpf/progs/test_autoattach.c b/tools/testing/selftests/bpf/progs/test_autoattach.c
new file mode 100644
index 000000000000..11a44493ebce
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_autoattach.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+
+bool prog1_called = false;
+bool prog2_called = false;
+
+SEC("raw_tp/sys_enter")
+int prog1(const void *ctx)
+{
+ prog1_called = true;
+ return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int prog2(const void *ctx)
+{
+ prog2_called = true;
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_cookie.c b/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
index 22d0ac8709b4..5a3a80f751c4 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
@@ -28,14 +28,14 @@ static void update(void *ctx, __u64 *res)
*res |= bpf_get_attach_cookie(ctx);
}
-SEC("kprobe/sys_nanosleep")
+SEC("kprobe")
int handle_kprobe(struct pt_regs *ctx)
{
update(ctx, &kprobe_res);
return 0;
}
-SEC("kretprobe/sys_nanosleep")
+SEC("kretprobe")
int handle_kretprobe(struct pt_regs *ctx)
{
update(ctx, &kretprobe_res);
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
index 196cd8dfe42a..2722441850cc 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
@@ -23,6 +23,12 @@ int test_insert_entry = -EAFNOSUPPORT;
int test_succ_lookup = -ENOENT;
u32 test_delta_timeout = 0;
u32 test_status = 0;
+__be32 saddr = 0;
+__be16 sport = 0;
+__be32 daddr = 0;
+__be16 dport = 0;
+int test_exist_lookup = -ENOENT;
+u32 test_exist_lookup_mark = 0;
struct nf_conn;
@@ -160,6 +166,21 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
}
test_alloc_entry = 0;
}
+
+ bpf_tuple.ipv4.saddr = saddr;
+ bpf_tuple.ipv4.daddr = daddr;
+ bpf_tuple.ipv4.sport = sport;
+ bpf_tuple.ipv4.dport = dport;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ if (ct) {
+ test_exist_lookup = 0;
+ if (ct->mark == 42)
+ test_exist_lookup_mark = 43;
+ bpf_ct_release(ct);
+ } else {
+ test_exist_lookup = opts_def.error;
+ }
}
SEC("xdp")
diff --git a/tools/testing/selftests/bpf/progs/test_helper_restricted.c b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
index 20ef9d433b97..5715c569ec03 100644
--- a/tools/testing/selftests/bpf/progs/test_helper_restricted.c
+++ b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
@@ -72,7 +72,7 @@ int tp_timer(void *ctx)
return 0;
}
-SEC("?kprobe/sys_nanosleep")
+SEC("?kprobe")
int kprobe_timer(void *ctx)
{
timer_work();
@@ -104,7 +104,7 @@ int tp_spin_lock(void *ctx)
return 0;
}
-SEC("?kprobe/sys_nanosleep")
+SEC("?kprobe")
int kprobe_spin_lock(void *ctx)
{
spin_lock_work();
diff --git a/tools/testing/selftests/bpf/progs/test_time_tai.c b/tools/testing/selftests/bpf/progs/test_time_tai.c
new file mode 100644
index 000000000000..7ea0863f3ddb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_time_tai.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Linutronix GmbH */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("tc")
+int time_tai(struct __sk_buff *skb)
+{
+ __u64 ts1, ts2;
+
+ /* Get TAI timestamps */
+ ts1 = bpf_ktime_get_tai_ns();
+ ts2 = bpf_ktime_get_tai_ns();
+
+ /* Save TAI timestamps (Note: skb->hwtstamp is read-only) */
+ skb->tstamp = ts1;
+ skb->cb[0] = ts2 & 0xffffffff;
+ skb->cb[1] = ts2 >> 32;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh
index b86ae4a2e5c5..a29aa05ebb3e 100755
--- a/tools/testing/selftests/bpf/vmtest.sh
+++ b/tools/testing/selftests/bpf/vmtest.sh
@@ -307,6 +307,20 @@ update_kconfig()
fi
}
+catch()
+{
+ local exit_code=$1
+ local exit_status_file="${OUTPUT_DIR}/${EXIT_STATUS_FILE}"
+ # This is just a cleanup and the directory may
+ # have already been unmounted. So, don't let this
+ # clobber the error code we intend to return.
+ unmount_image || true
+ if [[ -f "${exit_status_file}" ]]; then
+ exit_code="$(cat ${exit_status_file})"
+ fi
+ exit ${exit_code}
+}
+
main()
{
local script_dir="$(cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)"
@@ -319,7 +333,7 @@ main()
local exit_command="poweroff -f"
local debug_shell="no"
- while getopts 'hskid:j:' opt; do
+ while getopts ':hskid:j:' opt; do
case ${opt} in
i)
update_image="yes"
@@ -353,6 +367,8 @@ main()
done
shift $((OPTIND -1))
+ trap 'catch "$?"' EXIT
+
if [[ $# -eq 0 && "${debug_shell}" == "no" ]]; then
echo "No command specified, will run ${DEFAULT_COMMAND} in the vm"
else
@@ -409,20 +425,4 @@ main()
fi
}
-catch()
-{
- local exit_code=$1
- local exit_status_file="${OUTPUT_DIR}/${EXIT_STATUS_FILE}"
- # This is just a cleanup and the directory may
- # have already been unmounted. So, don't let this
- # clobber the error code we intend to return.
- unmount_image || true
- if [[ -f "${exit_status_file}" ]]; then
- exit_code="$(cat ${exit_status_file})"
- fi
- exit ${exit_code}
-}
-
-trap 'catch "$?"' EXIT
-
main "$@"
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 74d56d971baf..14b4737b223c 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -244,6 +244,11 @@ static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject,
memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE);
}
+static bool is_umem_valid(struct ifobject *ifobj)
+{
+ return !!ifobj->umem->umem;
+}
+
static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr)
{
udp_hdr->check = 0;
@@ -817,12 +822,13 @@ static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
return TEST_PASS;
}
-static int receive_pkts(struct ifobject *ifobj, struct pollfd *fds)
+static int receive_pkts(struct test_spec *test, struct pollfd *fds)
{
- struct timeval tv_end, tv_now, tv_timeout = {RECV_TMOUT, 0};
+ struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
+ struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream;
u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkts_sent = 0;
- struct pkt_stream *pkt_stream = ifobj->pkt_stream;
- struct xsk_socket_info *xsk = ifobj->xsk;
+ struct xsk_socket_info *xsk = test->ifobj_rx->xsk;
+ struct ifobject *ifobj = test->ifobj_rx;
struct xsk_umem_info *umem = xsk->umem;
struct pkt *pkt;
int ret;
@@ -843,17 +849,28 @@ static int receive_pkts(struct ifobject *ifobj, struct pollfd *fds)
}
kick_rx(xsk);
+ if (ifobj->use_poll) {
+ ret = poll(fds, 1, POLL_TMOUT);
+ if (ret < 0)
+ exit_with_error(-ret);
+
+ if (!ret) {
+ if (!is_umem_valid(test->ifobj_tx))
+ return TEST_PASS;
+
+ ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
+ return TEST_FAILURE;
- rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
- if (!rcvd) {
- if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
- ret = poll(fds, 1, POLL_TMOUT);
- if (ret < 0)
- exit_with_error(-ret);
}
- continue;
+
+ if (!(fds->revents & POLLIN))
+ continue;
}
+ rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
+ if (!rcvd)
+ continue;
+
if (ifobj->use_fill_ring) {
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
while (ret != rcvd) {
@@ -900,13 +917,35 @@ static int receive_pkts(struct ifobject *ifobj, struct pollfd *fds)
return TEST_PASS;
}
-static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb)
+static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fds,
+ bool timeout)
{
struct xsk_socket_info *xsk = ifobject->xsk;
- u32 i, idx, valid_pkts = 0;
+ bool use_poll = ifobject->use_poll;
+ u32 i, idx = 0, ret, valid_pkts = 0;
+
+ while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
+ if (use_poll) {
+ ret = poll(fds, 1, POLL_TMOUT);
+ if (timeout) {
+ if (ret < 0) {
+ ksft_print_msg("ERROR: [%s] Poll error %d\n",
+ __func__, ret);
+ return TEST_FAILURE;
+ }
+ if (ret == 0)
+ return TEST_PASS;
+ break;
+ }
+ if (ret <= 0) {
+ ksft_print_msg("ERROR: [%s] Poll error %d\n",
+ __func__, ret);
+ return TEST_FAILURE;
+ }
+ }
- while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE)
complete_pkts(xsk, BATCH_SIZE);
+ }
for (i = 0; i < BATCH_SIZE; i++) {
struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
@@ -933,11 +972,27 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb)
xsk_ring_prod__submit(&xsk->tx, i);
xsk->outstanding_tx += valid_pkts;
- if (complete_pkts(xsk, i))
- return TEST_FAILURE;
- usleep(10);
- return TEST_PASS;
+ if (use_poll) {
+ ret = poll(fds, 1, POLL_TMOUT);
+ if (ret <= 0) {
+ if (ret == 0 && timeout)
+ return TEST_PASS;
+
+ ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
+ return TEST_FAILURE;
+ }
+ }
+
+ if (!timeout) {
+ if (complete_pkts(xsk, i))
+ return TEST_FAILURE;
+
+ usleep(10);
+ return TEST_PASS;
+ }
+
+ return TEST_CONTINUE;
}
static void wait_for_tx_completion(struct xsk_socket_info *xsk)
@@ -948,29 +1003,19 @@ static void wait_for_tx_completion(struct xsk_socket_info *xsk)
static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
{
+ bool timeout = !is_umem_valid(test->ifobj_rx);
struct pollfd fds = { };
- u32 pkt_cnt = 0;
+ u32 pkt_cnt = 0, ret;
fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
fds.events = POLLOUT;
while (pkt_cnt < ifobject->pkt_stream->nb_pkts) {
- int err;
-
- if (ifobject->use_poll) {
- int ret;
-
- ret = poll(&fds, 1, POLL_TMOUT);
- if (ret <= 0)
- continue;
-
- if (!(fds.revents & POLLOUT))
- continue;
- }
-
- err = __send_pkts(ifobject, &pkt_cnt);
- if (err || test->fail)
+ ret = __send_pkts(ifobject, &pkt_cnt, &fds, timeout);
+ if ((ret || test->fail) && !timeout)
return TEST_FAILURE;
+ else if (ret == TEST_PASS && timeout)
+ return ret;
}
wait_for_tx_completion(ifobject->xsk);
@@ -1235,7 +1280,7 @@ static void *worker_testapp_validate_rx(void *arg)
pthread_barrier_wait(&barr);
- err = receive_pkts(ifobject, &fds);
+ err = receive_pkts(test, &fds);
if (!err && ifobject->validation_func)
err = ifobject->validation_func(ifobject);
@@ -1251,6 +1296,33 @@ static void *worker_testapp_validate_rx(void *arg)
pthread_exit(NULL);
}
+static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj,
+ enum test_type type)
+{
+ pthread_t t0;
+
+ if (pthread_barrier_init(&barr, NULL, 2))
+ exit_with_error(errno);
+
+ test->current_step++;
+ if (type == TEST_TYPE_POLL_RXQ_TMOUT)
+ pkt_stream_reset(ifobj->pkt_stream);
+ pkts_in_flight = 0;
+
+ /*Spawn thread */
+ pthread_create(&t0, NULL, ifobj->func_ptr, test);
+
+ if (type != TEST_TYPE_POLL_TXQ_TMOUT)
+ pthread_barrier_wait(&barr);
+
+ if (pthread_barrier_destroy(&barr))
+ exit_with_error(errno);
+
+ pthread_join(t0, NULL);
+
+ return !!test->fail;
+}
+
static int testapp_validate_traffic(struct test_spec *test)
{
struct ifobject *ifobj_tx = test->ifobj_tx;
@@ -1548,12 +1620,30 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
pkt_stream_restore_default(test);
break;
- case TEST_TYPE_POLL:
- test->ifobj_tx->use_poll = true;
+ case TEST_TYPE_RX_POLL:
test->ifobj_rx->use_poll = true;
- test_spec_set_name(test, "POLL");
+ test_spec_set_name(test, "POLL_RX");
testapp_validate_traffic(test);
break;
+ case TEST_TYPE_TX_POLL:
+ test->ifobj_tx->use_poll = true;
+ test_spec_set_name(test, "POLL_TX");
+ testapp_validate_traffic(test);
+ break;
+ case TEST_TYPE_POLL_TXQ_TMOUT:
+ test_spec_set_name(test, "POLL_TXQ_FULL");
+ test->ifobj_tx->use_poll = true;
+ /* create invalid frame by set umem frame_size and pkt length equal to 2048 */
+ test->ifobj_tx->umem->frame_size = 2048;
+ pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
+ testapp_validate_traffic_single_thread(test, test->ifobj_tx, type);
+ pkt_stream_restore_default(test);
+ break;
+ case TEST_TYPE_POLL_RXQ_TMOUT:
+ test_spec_set_name(test, "POLL_RXQ_EMPTY");
+ test->ifobj_rx->use_poll = true;
+ testapp_validate_traffic_single_thread(test, test->ifobj_rx, type);
+ break;
case TEST_TYPE_ALIGNED_INV_DESC:
test_spec_set_name(test, "ALIGNED_INV_DESC");
testapp_invalid_desc(test);
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index 3d17053f98e5..ee97576757a9 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -27,6 +27,7 @@
#define TEST_PASS 0
#define TEST_FAILURE -1
+#define TEST_CONTINUE 1
#define MAX_INTERFACES 2
#define MAX_INTERFACE_NAME_CHARS 7
#define MAX_INTERFACES_NAMESPACE_CHARS 10
@@ -48,7 +49,7 @@
#define SOCK_RECONF_CTR 10
#define BATCH_SIZE 64
#define POLL_TMOUT 1000
-#define RECV_TMOUT 3
+#define THREAD_TMOUT 3
#define DEFAULT_PKT_CNT (4 * 1024)
#define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
#define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE)
@@ -68,7 +69,10 @@ enum test_type {
TEST_TYPE_RUN_TO_COMPLETION,
TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME,
TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT,
- TEST_TYPE_POLL,
+ TEST_TYPE_RX_POLL,
+ TEST_TYPE_TX_POLL,
+ TEST_TYPE_POLL_RXQ_TMOUT,
+ TEST_TYPE_POLL_TXQ_TMOUT,
TEST_TYPE_UNALIGNED,
TEST_TYPE_ALIGNED_INV_DESC,
TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME,