aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/test_netcnt.c
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2018-09-28 14:46:00 +0000
committerDaniel Borkmann <daniel@iogearbox.net>2018-10-01 16:18:33 +0200
commit371e4fcc9d96ab1c8d72d59ca4ee3537402d1584 (patch)
tree1b48dce888a0bcd3b8b4a747dc661b6e079e3f26 /tools/testing/selftests/bpf/test_netcnt.c
parentsamples/bpf: extend test_cgrp2_attach2 test to use per-cpu cgroup storage (diff)
downloadlinux-dev-371e4fcc9d96ab1c8d72d59ca4ee3537402d1584.tar.xz
linux-dev-371e4fcc9d96ab1c8d72d59ca4ee3537402d1584.zip
selftests/bpf: cgroup local storage-based network counters
This commit adds a bpf kselftest, which demonstrates how percpu and shared cgroup local storage can be used for efficient lookup-free network accounting. Cgroup local storage provides generic memory area with a very efficient lookup free access. To avoid expensive atomic operations for each packet, per-cpu cgroup local storage is used. Each packet is initially charged to a per-cpu counter, and only if the counter reaches certain value (32 in this case), the charge is moved into the global atomic counter. This allows to amortize atomic operations, keeping reasonable accuracy. The test also implements a naive network traffic throttling, mostly to demonstrate the possibility of bpf cgroup--based network bandwidth control. Expected output: ./test_netcnt test_netcnt:PASS Signed-off-by: Roman Gushchin <guro@fb.com> Acked-by: Song Liu <songliubraving@fb.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'tools/testing/selftests/bpf/test_netcnt.c')
-rw-r--r--tools/testing/selftests/bpf/test_netcnt.c158
1 files changed, 158 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c
new file mode 100644
index 000000000000..7887df693399
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_netcnt.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/sysinfo.h>
+#include <sys/time.h>
+
+#include <linux/bpf.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "cgroup_helpers.h"
+#include "bpf_rlimit.h"
+#include "netcnt_common.h"
+
+#define BPF_PROG "./netcnt_prog.o"
+#define TEST_CGROUP "/test-network-counters/"
+
+static int bpf_find_map(const char *test, struct bpf_object *obj,
+ const char *name)
+{
+ struct bpf_map *map;
+
+ map = bpf_object__find_map_by_name(obj, name);
+ if (!map) {
+ printf("%s:FAIL:map '%s' not found\n", test, name);
+ return -1;
+ }
+ return bpf_map__fd(map);
+}
+
+int main(int argc, char **argv)
+{
+ struct percpu_net_cnt *percpu_netcnt;
+ struct bpf_cgroup_storage_key key;
+ int map_fd, percpu_map_fd;
+ int error = EXIT_FAILURE;
+ struct net_cnt netcnt;
+ struct bpf_object *obj;
+ int prog_fd, cgroup_fd;
+ unsigned long packets;
+ unsigned long bytes;
+ int cpu, nproc;
+ __u32 prog_cnt;
+
+ nproc = get_nprocs_conf();
+ percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc);
+ if (!percpu_netcnt) {
+ printf("Not enough memory for per-cpu area (%d cpus)\n", nproc);
+ goto err;
+ }
+
+ if (bpf_prog_load(BPF_PROG, BPF_PROG_TYPE_CGROUP_SKB,
+ &obj, &prog_fd)) {
+ printf("Failed to load bpf program\n");
+ goto out;
+ }
+
+ if (setup_cgroup_environment()) {
+ printf("Failed to load bpf program\n");
+ goto err;
+ }
+
+ /* Create a cgroup, get fd, and join it */
+ cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
+ if (!cgroup_fd) {
+ printf("Failed to create test cgroup\n");
+ goto err;
+ }
+
+ if (join_cgroup(TEST_CGROUP)) {
+ printf("Failed to join cgroup\n");
+ goto err;
+ }
+
+ /* Attach bpf program */
+ if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) {
+ printf("Failed to attach bpf program");
+ goto err;
+ }
+
+ assert(system("ping localhost -6 -c 10000 -f -q > /dev/null") == 0);
+
+ if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL,
+ &prog_cnt)) {
+ printf("Failed to query attached programs");
+ goto err;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "netcnt");
+ if (map_fd < 0) {
+ printf("Failed to find bpf map with net counters");
+ goto err;
+ }
+
+ percpu_map_fd = bpf_find_map(__func__, obj, "percpu_netcnt");
+ if (percpu_map_fd < 0) {
+ printf("Failed to find bpf map with percpu net counters");
+ goto err;
+ }
+
+ if (bpf_map_get_next_key(map_fd, NULL, &key)) {
+ printf("Failed to get key in cgroup storage\n");
+ goto err;
+ }
+
+ if (bpf_map_lookup_elem(map_fd, &key, &netcnt)) {
+ printf("Failed to lookup cgroup storage\n");
+ goto err;
+ }
+
+ if (bpf_map_lookup_elem(percpu_map_fd, &key, &percpu_netcnt[0])) {
+ printf("Failed to lookup percpu cgroup storage\n");
+ goto err;
+ }
+
+ /* Some packets can be still in per-cpu cache, but not more than
+ * MAX_PERCPU_PACKETS.
+ */
+ packets = netcnt.packets;
+ bytes = netcnt.bytes;
+ for (cpu = 0; cpu < nproc; cpu++) {
+ if (percpu_netcnt[cpu].packets > MAX_PERCPU_PACKETS) {
+ printf("Unexpected percpu value: %llu\n",
+ percpu_netcnt[cpu].packets);
+ goto err;
+ }
+
+ packets += percpu_netcnt[cpu].packets;
+ bytes += percpu_netcnt[cpu].bytes;
+ }
+
+ /* No packets should be lost */
+ if (packets != 10000) {
+ printf("Unexpected packet count: %lu\n", packets);
+ goto err;
+ }
+
+ /* Let's check that bytes counter matches the number of packets
+ * multiplied by the size of ipv6 ICMP packet.
+ */
+ if (bytes != packets * 104) {
+ printf("Unexpected bytes count: %lu\n", bytes);
+ goto err;
+ }
+
+ error = 0;
+ printf("test_netcnt:PASS\n");
+
+err:
+ cleanup_cgroup_environment();
+ free(percpu_netcnt);
+
+out:
+ return error;
+}