aboutsummaryrefslogtreecommitdiffstats
path: root/tools/lib/bpf/libbpf_probes.c
blob: 97b06cede56f2da8176b8f4c2d8d0693379dec70 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2019 Netronome Systems, Inc. */

#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <net/if.h>
#include <sys/utsname.h>

#include <linux/btf.h>
#include <linux/filter.h>
#include <linux/kernel.h>

#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"

static bool grep(const char *buffer, const char *pattern)
{
	return !!strstr(buffer, pattern);
}

static int get_vendor_id(int ifindex)
{
	char ifname[IF_NAMESIZE], path[64], buf[8];
	ssize_t len;
	int fd;

	if (!if_indextoname(ifindex, ifname))
		return -1;

	snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);

	fd = open(path, O_RDONLY | O_CLOEXEC);
	if (fd < 0)
		return -1;

	len = read(fd, buf, sizeof(buf));
	close(fd);
	if (len < 0)
		return -1;
	if (len >= (ssize_t)sizeof(buf))
		return -1;
	buf[len] = '\0';

	return strtol(buf, NULL, 0);
}

static int probe_prog_load(enum bpf_prog_type prog_type,
			   const struct bpf_insn *insns, size_t insns_cnt,
			   char *log_buf, size_t log_buf_sz,
			   __u32 ifindex)
{
	LIBBPF_OPTS(bpf_prog_load_opts, opts,
		.log_buf = log_buf,
		.log_size = log_buf_sz,
		.log_level = log_buf ? 1 : 0,
		.prog_ifindex = ifindex,
	);
	int fd, err, exp_err = 0;
	const char *exp_msg = NULL;
	char buf[4096];

	switch (prog_type) {
	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
		opts.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
		break;
	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
		opts.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
		break;
	case BPF_PROG_TYPE_SK_LOOKUP:
		opts.expected_attach_type = BPF_SK_LOOKUP;
		break;
	case BPF_PROG_TYPE_KPROBE:
		opts.kern_version = get_kernel_version();
		break;
	case BPF_PROG_TYPE_LIRC_MODE2:
		opts.expected_attach_type = BPF_LIRC_MODE2;
		break;
	case BPF_PROG_TYPE_TRACING:
	case BPF_PROG_TYPE_LSM:
		opts.log_buf = buf;
		opts.log_size = sizeof(buf);
		opts.log_level = 1;
		if (prog_type == BPF_PROG_TYPE_TRACING)
			opts.expected_attach_type = BPF_TRACE_FENTRY;
		else
			opts.expected_attach_type = BPF_MODIFY_RETURN;
		opts.attach_btf_id = 1;

		exp_err = -EINVAL;
		exp_msg = "attach_btf_id 1 is not a function";
		break;
	case BPF_PROG_TYPE_EXT:
		opts.log_buf = buf;
		opts.log_size = sizeof(buf);
		opts.log_level = 1;
		opts.attach_btf_id = 1;

		exp_err = -EINVAL;
		exp_msg = "Cannot replace kernel functions";
		break;
	case BPF_PROG_TYPE_SYSCALL:
		opts.prog_flags = BPF_F_SLEEPABLE;
		break;
	case BPF_PROG_TYPE_STRUCT_OPS:
		exp_err = -524; /* -ENOTSUPP */
		break;
	case BPF_PROG_TYPE_UNSPEC:
	case BPF_PROG_TYPE_SOCKET_FILTER:
	case BPF_PROG_TYPE_SCHED_CLS:
	case BPF_PROG_TYPE_SCHED_ACT:
	case BPF_PROG_TYPE_TRACEPOINT:
	case BPF_PROG_TYPE_XDP:
	case BPF_PROG_TYPE_PERF_EVENT:
	case BPF_PROG_TYPE_CGROUP_SKB:
	case BPF_PROG_TYPE_CGROUP_SOCK:
	case BPF_PROG_TYPE_LWT_IN:
	case BPF_PROG_TYPE_LWT_OUT:
	case BPF_PROG_TYPE_LWT_XMIT:
	case BPF_PROG_TYPE_SOCK_OPS:
	case BPF_PROG_TYPE_SK_SKB:
	case BPF_PROG_TYPE_CGROUP_DEVICE:
	case BPF_PROG_TYPE_SK_MSG:
	case BPF_PROG_TYPE_RAW_TRACEPOINT:
	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
	case BPF_PROG_TYPE_SK_REUSEPORT:
	case BPF_PROG_TYPE_FLOW_DISSECTOR:
	case BPF_PROG_TYPE_CGROUP_SYSCTL:
		break;
	default:
		return -EOPNOTSUPP;
	}

	fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts);
	err = -errno;
	if (fd >= 0)
		close(fd);
	if (exp_err) {
		if (fd >= 0 || err != exp_err)
			return 0;
		if (exp_msg && !strstr(buf, exp_msg))
			return 0;
		return 1;
	}
	return fd >= 0 ? 1 : 0;
}

int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
{
	struct bpf_insn insns[] = {
		BPF_MOV64_IMM(BPF_REG_0, 0),
		BPF_EXIT_INSN()
	};
	const size_t insn_cnt = ARRAY_SIZE(insns);
	int ret;

	if (opts)
		return libbpf_err(-EINVAL);

	ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0, 0);
	return libbpf_err(ret);
}

bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
{
	struct bpf_insn insns[2] = {
		BPF_MOV64_IMM(BPF_REG_0, 0),
		BPF_EXIT_INSN()
	};

	/* prefer libbpf_probe_bpf_prog_type() unless offload is requested */
	if (ifindex == 0)
		return libbpf_probe_bpf_prog_type(prog_type, NULL) == 1;

	if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
		/* nfp returns -EINVAL on exit(0) with TC offload */
		insns[0].imm = 2;

	errno = 0;
	probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);

	return errno != EINVAL && errno != EOPNOTSUPP;
}

int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
			 const char *str_sec, size_t str_len)
{
	struct btf_header hdr = {
		.magic = BTF_MAGIC,
		.version = BTF_VERSION,
		.hdr_len = sizeof(struct btf_header),
		.type_len = types_len,
		.str_off = types_len,
		.str_len = str_len,
	};
	int btf_fd, btf_len;
	__u8 *raw_btf;

	btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len;
	raw_btf = malloc(btf_len);
	if (!raw_btf)
		return -ENOMEM;

	memcpy(raw_btf, &hdr, sizeof(hdr));
	memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
	memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);

	btf_fd = bpf_btf_load(raw_btf, btf_len, NULL);

	free(raw_btf);
	return btf_fd;
}

static int load_local_storage_btf(void)
{
	const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
	/* struct bpf_spin_lock {
	 *   int val;
	 * };
	 * struct val {
	 *   int cnt;
	 *   struct bpf_spin_lock l;
	 * };
	 */
	__u32 types[] = {
		/* int */
		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
		/* struct bpf_spin_lock */                      /* [2] */
		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
		BTF_MEMBER_ENC(15, 1, 0), /* int val; */
		/* struct val */                                /* [3] */
		BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
		BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
		BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
	};

	return libbpf__load_raw_btf((char *)types, sizeof(types),
				     strs, sizeof(strs));
}

static int probe_map_create(enum bpf_map_type map_type, __u32 ifindex)
{
	LIBBPF_OPTS(bpf_map_create_opts, opts);
	int key_size, value_size, max_entries;
	__u32 btf_key_type_id = 0, btf_value_type_id = 0;
	int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err;

	opts.map_ifindex = ifindex;

	key_size	= sizeof(__u32);
	value_size	= sizeof(__u32);
	max_entries	= 1;

	switch (map_type) {
	case BPF_MAP_TYPE_STACK_TRACE:
		value_size	= sizeof(__u64);
		break;
	case BPF_MAP_TYPE_LPM_TRIE:
		key_size	= sizeof(__u64);
		value_size	= sizeof(__u64);
		opts.map_flags	= BPF_F_NO_PREALLOC;
		break;
	case BPF_MAP_TYPE_CGROUP_STORAGE:
	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
		key_size	= sizeof(struct bpf_cgroup_storage_key);
		value_size	= sizeof(__u64);
		max_entries	= 0;
		break;
	case BPF_MAP_TYPE_QUEUE:
	case BPF_MAP_TYPE_STACK:
		key_size	= 0;
		break;
	case BPF_MAP_TYPE_SK_STORAGE:
	case BPF_MAP_TYPE_INODE_STORAGE:
	case BPF_MAP_TYPE_TASK_STORAGE:
		btf_key_type_id = 1;
		btf_value_type_id = 3;
		value_size = 8;
		max_entries = 0;
		opts.map_flags = BPF_F_NO_PREALLOC;
		btf_fd = load_local_storage_btf();
		if (btf_fd < 0)
			return btf_fd;
		break;
	case BPF_MAP_TYPE_RINGBUF:
		key_size = 0;
		value_size = 0;
		max_entries = 4096;
		break;
	case BPF_MAP_TYPE_STRUCT_OPS:
		/* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
		opts.btf_vmlinux_value_type_id = 1;
		exp_err = -524; /* -ENOTSUPP */
		break;
	case BPF_MAP_TYPE_BLOOM_FILTER:
		key_size = 0;
		max_entries = 1;
		break;
	case BPF_MAP_TYPE_HASH:
	case BPF_MAP_TYPE_ARRAY:
	case BPF_MAP_TYPE_PROG_ARRAY:
	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
	case BPF_MAP_TYPE_PERCPU_HASH:
	case BPF_MAP_TYPE_PERCPU_ARRAY:
	case BPF_MAP_TYPE_CGROUP_ARRAY:
	case BPF_MAP_TYPE_LRU_HASH:
	case BPF_MAP_TYPE_LRU_PERCPU_HASH:
	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
	case BPF_MAP_TYPE_HASH_OF_MAPS:
	case BPF_MAP_TYPE_DEVMAP:
	case BPF_MAP_TYPE_DEVMAP_HASH:
	case BPF_MAP_TYPE_SOCKMAP:
	case BPF_MAP_TYPE_CPUMAP:
	case BPF_MAP_TYPE_XSKMAP:
	case BPF_MAP_TYPE_SOCKHASH:
	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
		break;
	case BPF_MAP_TYPE_UNSPEC:
	default:
		return -EOPNOTSUPP;
	}

	if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
	    map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
		/* TODO: probe for device, once libbpf has a function to create
		 * map-in-map for offload
		 */
		if (ifindex)
			goto cleanup;

		fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
					  sizeof(__u32), sizeof(__u32), 1, NULL);
		if (fd_inner < 0)
			goto cleanup;

		opts.inner_map_fd = fd_inner;
	}

	if (btf_fd >= 0) {
		opts.btf_fd = btf_fd;
		opts.btf_key_type_id = btf_key_type_id;
		opts.btf_value_type_id = btf_value_type_id;
	}

	fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts);
	err = -errno;

cleanup:
	if (fd >= 0)
		close(fd);
	if (fd_inner >= 0)
		close(fd_inner);
	if (btf_fd >= 0)
		close(btf_fd);

	if (exp_err)
		return fd < 0 && err == exp_err ? 1 : 0;
	else
		return fd >= 0 ? 1 : 0;
}

int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts)
{
	int ret;

	if (opts)
		return libbpf_err(-EINVAL);

	ret = probe_map_create(map_type, 0);
	return libbpf_err(ret);
}

bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
{
	return probe_map_create(map_type, ifindex) == 1;
}

int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helper_id,
			    const void *opts)
{
	struct bpf_insn insns[] = {
		BPF_EMIT_CALL((__u32)helper_id),
		BPF_EXIT_INSN(),
	};
	const size_t insn_cnt = ARRAY_SIZE(insns);
	char buf[4096];
	int ret;

	if (opts)
		return libbpf_err(-EINVAL);

	/* we can't successfully load all prog types to check for BPF helper
	 * support, so bail out with -EOPNOTSUPP error
	 */
	switch (prog_type) {
	case BPF_PROG_TYPE_TRACING:
	case BPF_PROG_TYPE_EXT:
	case BPF_PROG_TYPE_LSM:
	case BPF_PROG_TYPE_STRUCT_OPS:
		return -EOPNOTSUPP;
	default:
		break;
	}

	buf[0] = '\0';
	ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf), 0);
	if (ret < 0)
		return libbpf_err(ret);

	/* If BPF verifier doesn't recognize BPF helper ID (enum bpf_func_id)
	 * at all, it will emit something like "invalid func unknown#181".
	 * If BPF verifier recognizes BPF helper but it's not supported for
	 * given BPF program type, it will emit "unknown func bpf_sys_bpf#166".
	 * In both cases, provided combination of BPF program type and BPF
	 * helper is not supported by the kernel.
	 * In all other cases, probe_prog_load() above will either succeed (e.g.,
	 * because BPF helper happens to accept no input arguments or it
	 * accepts one input argument and initial PTR_TO_CTX is fine for
	 * that), or we'll get some more specific BPF verifier error about
	 * some unsatisfied conditions.
	 */
	if (ret == 0 && (strstr(buf, "invalid func ") || strstr(buf, "unknown func ")))
		return 0;
	return 1; /* assume supported */
}

bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
		      __u32 ifindex)
{
	struct bpf_insn insns[2] = {
		BPF_EMIT_CALL(id),
		BPF_EXIT_INSN()
	};
	char buf[4096] = {};
	bool res;

	probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf), ifindex);
	res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");

	if (ifindex) {
		switch (get_vendor_id(ifindex)) {
		case 0x19ee: /* Netronome specific */
			res = res && !grep(buf, "not supported by FW") &&
				!grep(buf, "unsupported function id");
			break;
		default:
			break;
		}
	}

	return res;
}

/*
 * Probe for availability of kernel commit (5.3):
 *
 * c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
 */
bool bpf_probe_large_insn_limit(__u32 ifindex)
{
	struct bpf_insn insns[BPF_MAXINSNS + 1];
	int i;

	for (i = 0; i < BPF_MAXINSNS; i++)
		insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
	insns[BPF_MAXINSNS] = BPF_EXIT_INSN();

	errno = 0;
	probe_prog_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
			ifindex);

	return errno != E2BIG && errno != EINVAL;
}