aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/testing/selftests/bpf/progs/task_ls_recursion.c
blob: 4542dc683b44a62aa60134cb43fe52205b42f0b9 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */

#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>

#ifndef EBUSY
#define EBUSY 16
#endif

char _license[] SEC("license") = "GPL";
int nr_del_errs = 0;
int test_pid = 0;

struct {
	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
	__uint(map_flags, BPF_F_NO_PREALLOC);
	__type(key, int);
	__type(value, long);
} map_a SEC(".maps");

struct {
	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
	__uint(map_flags, BPF_F_NO_PREALLOC);
	__type(key, int);
	__type(value, long);
} map_b SEC(".maps");

SEC("fentry/bpf_local_storage_lookup")
int BPF_PROG(on_lookup)
{
	struct task_struct *task = bpf_get_current_task_btf();

	if (!test_pid || task->pid != test_pid)
		return 0;

	/* The bpf_task_storage_delete will call
	 * bpf_local_storage_lookup.  The prog->active will
	 * stop the recursion.
	 */
	bpf_task_storage_delete(&map_a, task);
	bpf_task_storage_delete(&map_b, task);
	return 0;
}

SEC("fentry/bpf_local_storage_update")
int BPF_PROG(on_update)
{
	struct task_struct *task = bpf_get_current_task_btf();
	long *ptr;

	if (!test_pid || task->pid != test_pid)
		return 0;

	ptr = bpf_task_storage_get(&map_a, task, 0,
				   BPF_LOCAL_STORAGE_GET_F_CREATE);
	/* ptr will not be NULL when it is called from
	 * the bpf_task_storage_get(&map_b,...F_CREATE) in
	 * the BPF_PROG(on_enter) below.  It is because
	 * the value can be found in map_a and the kernel
	 * does not need to acquire any spin_lock.
	 */
	if (ptr) {
		int err;

		*ptr += 1;
		err = bpf_task_storage_delete(&map_a, task);
		if (err == -EBUSY)
			nr_del_errs++;
	}

	/* This will still fail because map_b is empty and
	 * this BPF_PROG(on_update) has failed to acquire
	 * the percpu busy lock => meaning potential
	 * deadlock is detected and it will fail to create
	 * new storage.
	 */
	ptr = bpf_task_storage_get(&map_b, task, 0,
				   BPF_LOCAL_STORAGE_GET_F_CREATE);
	if (ptr)
		*ptr += 1;

	return 0;
}

SEC("tp_btf/sys_enter")
int BPF_PROG(on_enter, struct pt_regs *regs, long id)
{
	struct task_struct *task;
	long *ptr;

	task = bpf_get_current_task_btf();
	if (!test_pid || task->pid != test_pid)
		return 0;

	ptr = bpf_task_storage_get(&map_a, task, 0,
				   BPF_LOCAL_STORAGE_GET_F_CREATE);
	if (ptr && !*ptr)
		*ptr = 200;

	ptr = bpf_task_storage_get(&map_b, task, 0,
				   BPF_LOCAL_STORAGE_GET_F_CREATE);
	if (ptr && !*ptr)
		*ptr = 100;
	return 0;
}