aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c
blob: 279ff1b8b5b23ae60d3cdc43fe2dfa0e9bc26b84 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Isovalent */

#include <sys/random.h>
#include <argp.h>
#include "bench.h"
#include "bpf_hashmap_lookup.skel.h"
#include "bpf_util.h"

/* BPF triggering benchmarks */
static struct ctx {
	struct bpf_hashmap_lookup *skel;
} ctx;

/* only available to kernel, so define it here */
#define BPF_MAX_LOOPS (1<<23)

#define MAX_KEY_SIZE 1024 /* the size of the key map */

static struct {
	__u32 key_size;
	__u32 map_flags;
	__u32 max_entries;
	__u32 nr_entries;
	__u32 nr_loops;
} args = {
	.key_size = 4,
	.map_flags = 0,
	.max_entries = 1000,
	.nr_entries = 500,
	.nr_loops = 1000000,
};

enum {
	ARG_KEY_SIZE = 8001,
	ARG_MAP_FLAGS,
	ARG_MAX_ENTRIES,
	ARG_NR_ENTRIES,
	ARG_NR_LOOPS,
};

static const struct argp_option opts[] = {
	{ "key_size", ARG_KEY_SIZE, "KEY_SIZE", 0,
	  "The hashmap key size (max 1024)"},
	{ "map_flags", ARG_MAP_FLAGS, "MAP_FLAGS", 0,
	  "The hashmap flags passed to BPF_MAP_CREATE"},
	{ "max_entries", ARG_MAX_ENTRIES, "MAX_ENTRIES", 0,
	  "The hashmap max entries"},
	{ "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
	  "The number of entries to insert/lookup"},
	{ "nr_loops", ARG_NR_LOOPS, "NR_LOOPS", 0,
	  "The number of loops for the benchmark"},
	{},
};

static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
	long ret;

	switch (key) {
	case ARG_KEY_SIZE:
		ret = strtol(arg, NULL, 10);
		if (ret < 1 || ret > MAX_KEY_SIZE) {
			fprintf(stderr, "invalid key_size");
			argp_usage(state);
		}
		args.key_size = ret;
		break;
	case ARG_MAP_FLAGS:
		ret = strtol(arg, NULL, 0);
		if (ret < 0 || ret > UINT_MAX) {
			fprintf(stderr, "invalid map_flags");
			argp_usage(state);
		}
		args.map_flags = ret;
		break;
	case ARG_MAX_ENTRIES:
		ret = strtol(arg, NULL, 10);
		if (ret < 1 || ret > UINT_MAX) {
			fprintf(stderr, "invalid max_entries");
			argp_usage(state);
		}
		args.max_entries = ret;
		break;
	case ARG_NR_ENTRIES:
		ret = strtol(arg, NULL, 10);
		if (ret < 1 || ret > UINT_MAX) {
			fprintf(stderr, "invalid nr_entries");
			argp_usage(state);
		}
		args.nr_entries = ret;
		break;
	case ARG_NR_LOOPS:
		ret = strtol(arg, NULL, 10);
		if (ret < 1 || ret > BPF_MAX_LOOPS) {
			fprintf(stderr, "invalid nr_loops: %ld (min=1 max=%u)\n",
				ret, BPF_MAX_LOOPS);
			argp_usage(state);
		}
		args.nr_loops = ret;
		break;
	default:
		return ARGP_ERR_UNKNOWN;
	}

	return 0;
}

const struct argp bench_hashmap_lookup_argp = {
	.options = opts,
	.parser = parse_arg,
};

static void validate(void)
{
	if (env.consumer_cnt != 0) {
		fprintf(stderr, "benchmark doesn't support consumer!\n");
		exit(1);
	}

	if (args.nr_entries > args.max_entries) {
		fprintf(stderr, "args.nr_entries is too big! (max %u, got %u)\n",
			args.max_entries, args.nr_entries);
		exit(1);
	}
}

static void *producer(void *input)
{
	while (true) {
		/* trigger the bpf program */
		syscall(__NR_getpgid);
	}
	return NULL;
}

static void measure(struct bench_res *res)
{
}

static inline void patch_key(u32 i, u32 *key)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	*key = i + 1;
#else
	*key = __builtin_bswap32(i + 1);
#endif
	/* the rest of key is random */
}

static void setup(void)
{
	struct bpf_link *link;
	int map_fd;
	int ret;
	int i;

	setup_libbpf();

	ctx.skel = bpf_hashmap_lookup__open();
	if (!ctx.skel) {
		fprintf(stderr, "failed to open skeleton\n");
		exit(1);
	}

	bpf_map__set_max_entries(ctx.skel->maps.hash_map_bench, args.max_entries);
	bpf_map__set_key_size(ctx.skel->maps.hash_map_bench, args.key_size);
	bpf_map__set_value_size(ctx.skel->maps.hash_map_bench, 8);
	bpf_map__set_map_flags(ctx.skel->maps.hash_map_bench, args.map_flags);

	ctx.skel->bss->nr_entries = args.nr_entries;
	ctx.skel->bss->nr_loops = args.nr_loops / args.nr_entries;

	if (args.key_size > 4) {
		for (i = 1; i < args.key_size/4; i++)
			ctx.skel->bss->key[i] = 2654435761 * i;
	}

	ret = bpf_hashmap_lookup__load(ctx.skel);
	if (ret) {
		bpf_hashmap_lookup__destroy(ctx.skel);
		fprintf(stderr, "failed to load map: %s", strerror(-ret));
		exit(1);
	}

	/* fill in the hash_map */
	map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench);
	for (u64 i = 0; i < args.nr_entries; i++) {
		patch_key(i, ctx.skel->bss->key);
		bpf_map_update_elem(map_fd, ctx.skel->bss->key, &i, BPF_ANY);
	}

	link = bpf_program__attach(ctx.skel->progs.benchmark);
	if (!link) {
		fprintf(stderr, "failed to attach program!\n");
		exit(1);
	}
}

static inline double events_from_time(u64 time)
{
	if (time)
		return args.nr_loops * 1000000000llu / time / 1000000.0L;

	return 0;
}

static int compute_events(u64 *times, double *events_mean, double *events_stddev, u64 *mean_time)
{
	int i, n = 0;

	*events_mean = 0;
	*events_stddev = 0;
	*mean_time = 0;

	for (i = 0; i < 32; i++) {
		if (!times[i])
			break;
		*mean_time += times[i];
		*events_mean += events_from_time(times[i]);
		n += 1;
	}
	if (!n)
		return 0;

	*mean_time /= n;
	*events_mean /= n;

	if (n > 1) {
		for (i = 0; i < n; i++) {
			double events_i = *events_mean - events_from_time(times[i]);
			*events_stddev += events_i * events_i / (n - 1);
		}
		*events_stddev = sqrt(*events_stddev);
	}

	return n;
}

static void hashmap_report_final(struct bench_res res[], int res_cnt)
{
	unsigned int nr_cpus = bpf_num_possible_cpus();
	double events_mean, events_stddev;
	u64 mean_time;
	int i, n;

	for (i = 0; i < nr_cpus; i++) {
		n = compute_events(ctx.skel->bss->percpu_times[i], &events_mean,
				   &events_stddev, &mean_time);
		if (n == 0)
			continue;

		if (env.quiet) {
			/* we expect only one cpu to be present */
			if (env.affinity)
				printf("%.3lf\n", events_mean);
			else
				printf("cpu%02d %.3lf\n", i, events_mean);
		} else {
			printf("cpu%02d: lookup %.3lfM ± %.3lfM events/sec"
			       " (approximated from %d samples of ~%lums)\n",
			       i, events_mean, 2*events_stddev,
			       n, mean_time / 1000000);
		}
	}
}

const struct bench bench_bpf_hashmap_lookup = {
	.name = "bpf-hashmap-lookup",
	.argp = &bench_hashmap_lookup_argp,
	.validate = validate,
	.setup = setup,
	.producer_thread = producer,
	.measure = measure,
	.report_progress = NULL,
	.report_final = hashmap_report_final,
};