aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/testing/selftests/bpf/bpf_arena_alloc.h
blob: c27678299e0c96506488c395385cbf225866c531 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#pragma once
#include "bpf_arena_common.h"

#ifndef __round_mask
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
#endif
#ifndef round_up
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#endif

#ifdef __BPF__
#define NR_CPUS (sizeof(struct cpumask) * 8)

static void __arena * __arena page_frag_cur_page[NR_CPUS];
static int __arena page_frag_cur_offset[NR_CPUS];

/* Simple page_frag allocator */
static inline void __arena* bpf_alloc(unsigned int size)
{
	__u64 __arena *obj_cnt;
	__u32 cpu = bpf_get_smp_processor_id();
	void __arena *page = page_frag_cur_page[cpu];
	int __arena *cur_offset = &page_frag_cur_offset[cpu];
	int offset;

	size = round_up(size, 8);
	if (size >= PAGE_SIZE - 8)
		return NULL;
	if (!page) {
refill:
		page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
		if (!page)
			return NULL;
		cast_kern(page);
		page_frag_cur_page[cpu] = page;
		*cur_offset = PAGE_SIZE - 8;
		obj_cnt = page + PAGE_SIZE - 8;
		*obj_cnt = 0;
	} else {
		cast_kern(page);
		obj_cnt = page + PAGE_SIZE - 8;
	}

	offset = *cur_offset - size;
	if (offset < 0)
		goto refill;

	(*obj_cnt)++;
	*cur_offset = offset;
	return page + offset;
}

static inline void bpf_free(void __arena *addr)
{
	__u64 __arena *obj_cnt;

	addr = (void __arena *)(((long)addr) & ~(PAGE_SIZE - 1));
	obj_cnt = addr + PAGE_SIZE - 8;
	if (--(*obj_cnt) == 0)
		bpf_arena_free_pages(&arena, addr, 1);
}
#else
static inline void __arena* bpf_alloc(unsigned int size) { return NULL; }
static inline void bpf_free(void __arena *addr) {}
#endif