aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
diff options
context:
space:
mode:
authorJiong Wang <jiong.wang@netronome.com>2019-02-11 12:01:20 +0000
committerAlexei Starovoitov <ast@kernel.org>2019-02-11 20:31:38 -0800
commitbd4aed0ee73ca873bef3cb3ec746dd796f03df28 (patch)
tree1dcd4de735f8ca3dbf672dc5d48d064ea1dea5dc /tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
parentselftests: bpf: extend sub-register mode compilation to all bpf object files (diff)
downloadlinux-bd4aed0ee73ca873bef3cb3ec746dd796f03df28.tar.xz
linux-bd4aed0ee73ca873bef3cb3ec746dd796f03df28.zip
selftests: bpf: centre kernel bpf objects under new subdir "progs"
At the moment, all kernel bpf objects are listed under BPF_OBJ_FILES. Listing them manually sometimes causing patch conflict when people are adding new testcases simultaneously. It is better to centre all the related source files under a subdir "progs", then auto-generate the object file list. Suggested-by: Alexei Starovoitov <ast@kernel.org> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: Jiong Wang <jiong.wang@netronome.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c')
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
new file mode 100644
index 000000000000..68cf9829f5a7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+
+#include <string.h>
+
+#include "bpf_helpers.h"
+
+#define NUM_CGROUP_LEVELS 4
+
+struct bpf_map_def SEC("maps") cgroup_ids = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u64),
+ .max_entries = NUM_CGROUP_LEVELS,
+};
+
+static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level)
+{
+ __u64 id;
+
+ /* [1] &level passed to external function that may change it, it's
+ * incompatible with loop unroll.
+ */
+ id = bpf_skb_ancestor_cgroup_id(skb, level);
+ bpf_map_update_elem(&cgroup_ids, &level, &id, 0);
+}
+
+SEC("cgroup_id_logger")
+int log_cgroup_id(struct __sk_buff *skb)
+{
+ /* Loop unroll can't be used here due to [1]. Unrolling manually.
+ * Number of calls should be in sync with NUM_CGROUP_LEVELS.
+ */
+ log_nth_level(skb, 0);
+ log_nth_level(skb, 1);
+ log_nth_level(skb, 2);
+ log_nth_level(skb, 3);
+
+ return TC_ACT_OK;
+}
+
+int _version SEC("version") = 1;
+
+char _license[] SEC("license") = "GPL";