aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorYafang Shao <laoar.shao@gmail.com>2023-03-05 12:46:13 +0000
committerAlexei Starovoitov <ast@kernel.org>2023-03-07 09:33:43 -0800
commitb4fd0d672bca001632d7291b5b162b08e065b815 (patch)
treeaae302ce6e01d5c2d8caf9847f7da0b5ebd62ea6
parentbpf, net: sock_map memory usage (diff)
downloadwireguard-linux-b4fd0d672bca001632d7291b5b162b08e065b815.tar.xz
wireguard-linux-b4fd0d672bca001632d7291b5b162b08e065b815.zip
bpf, net: xskmap memory usage
A new helper is introduced to calculate xskmap memory usage. The xfsmap memory usage can be dynamically changed when we add or remove a xsk_map_node. Hence we need to track the count of xsk_map_node to get its memory usage. The result as follows, - before 10: xskmap name count_map flags 0x0 key 4B value 4B max_entries 65536 memlock 524288B - after 10: xskmap name count_map flags 0x0 <<< no elements case key 4B value 4B max_entries 65536 memlock 524608B Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Link: https://lore.kernel.org/r/20230305124615.12358-17-laoar.shao@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--include/net/xdp_sock.h1
-rw-r--r--net/xdp/xskmap.c13
2 files changed, 14 insertions, 0 deletions
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 3057e1a4a11c..e96a1151ec75 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -38,6 +38,7 @@ struct xdp_umem {
struct xsk_map {
struct bpf_map map;
spinlock_t lock; /* Synchronize map updates */
+ atomic_t count;
struct xdp_sock __rcu *xsk_map[];
};
diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
index 771d0fa90ef5..0c38d7175922 100644
--- a/net/xdp/xskmap.c
+++ b/net/xdp/xskmap.c
@@ -24,6 +24,7 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
return ERR_PTR(-ENOMEM);
bpf_map_inc(&map->map);
+ atomic_inc(&map->count);
node->map = map;
node->map_entry = map_entry;
@@ -32,8 +33,11 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
static void xsk_map_node_free(struct xsk_map_node *node)
{
+ struct xsk_map *map = node->map;
+
bpf_map_put(&node->map->map);
kfree(node);
+ atomic_dec(&map->count);
}
static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
@@ -85,6 +89,14 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
return &m->map;
}
+static u64 xsk_map_mem_usage(const struct bpf_map *map)
+{
+ struct xsk_map *m = container_of(map, struct xsk_map, map);
+
+ return struct_size(m, xsk_map, map->max_entries) +
+ (u64)atomic_read(&m->count) * sizeof(struct xsk_map_node);
+}
+
static void xsk_map_free(struct bpf_map *map)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
@@ -267,6 +279,7 @@ const struct bpf_map_ops xsk_map_ops = {
.map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem,
.map_check_btf = map_check_no_btf,
+ .map_mem_usage = xsk_map_mem_usage,
.map_btf_id = &xsk_map_btf_ids[0],
.map_redirect = xsk_map_redirect,
};