aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c
blob: 7def158da9eb715c80d1915ec102ce62448a1660 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
#include <test_progs.h>

#include "linux/filter.h"
#include "kptr_xchg_inline.skel.h"

void test_kptr_xchg_inline(void)
{
	struct kptr_xchg_inline *skel;
	struct bpf_insn *insn = NULL;
	struct bpf_insn exp;
	unsigned int cnt;
	int err;

#if !(defined(__x86_64__) || defined(__aarch64__) || \
      (defined(__riscv) && __riscv_xlen == 64))
	test__skip();
	return;
#endif

	skel = kptr_xchg_inline__open_and_load();
	if (!ASSERT_OK_PTR(skel, "open_load"))
		return;

	err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt);
	if (!ASSERT_OK(err, "prog insn"))
		goto out;

	/* The original instructions are:
	 * r1 = map[id:xxx][0]+0
	 * r2 = 0
	 * call bpf_kptr_xchg#yyy
	 *
	 * call bpf_kptr_xchg#yyy will be inlined as:
	 * r0 = r2
	 * r0 = atomic64_xchg((u64 *)(r1 +0), r0)
	 */
	if (!ASSERT_GT(cnt, 5, "insn cnt"))
		goto out;

	exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2);
	if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov"))
		goto out;

	exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0);
	if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg"))
		goto out;
out:
	free(insn);
	kptr_xchg_inline__destroy(skel);
}