aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools
diff options
context:
space:
mode:
authorLuke Nelson <lukenels@cs.washington.edu>2020-04-18 16:26:54 -0700
committerAlexei Starovoitov <ast@kernel.org>2020-04-20 19:25:30 -0700
commitd2b6c3ab70dbc0069a69c57edd8c96f365f06b7c (patch)
treef8f4cf01b82216e46db29d51c0d0d86d23a5f34c /tools
parentbpf, x86: Fix encoding for lower 8-bit registers in BPF_STX BPF_B (diff)
downloadwireguard-linux-d2b6c3ab70dbc0069a69c57edd8c96f365f06b7c.tar.xz
wireguard-linux-d2b6c3ab70dbc0069a69c57edd8c96f365f06b7c.zip
bpf, selftests: Add test for BPF_STX BPF_B storing R10
This patch adds a test to test_verifier that writes the lower 8 bits of R10 (aka FP) using BPF_B to an array map and reads the result back. The expected behavior is that the result should be the same as first copying R10 to R9, and then storing / loading the lower 8 bits of R9. This test catches a bug that was present in the x86-64 JIT that caused an incorrect encoding for BPF_STX BPF_B when the source operand is R10. Signed-off-by: Xi Wang <xi.wang@gmail.com> Signed-off-by: Luke Nelson <luke.r.nels@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200418232655.23870-2-luke.r.nels@gmail.com
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/bpf/verifier/stack_ptr.c40
1 files changed, 40 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/verifier/stack_ptr.c b/tools/testing/selftests/bpf/verifier/stack_ptr.c
index 7276620ef242..8bfeb77c60bd 100644
--- a/tools/testing/selftests/bpf/verifier/stack_ptr.c
+++ b/tools/testing/selftests/bpf/verifier/stack_ptr.c
@@ -315,3 +315,43 @@
},
.result = ACCEPT,
},
+{
+ "store PTR_TO_STACK in R10 to array map using BPF_B",
+ .insns = {
+ /* Load pointer to map. */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ /* Copy R10 to R9. */
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_10),
+ /* Pollute other registers with unaligned values. */
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_3, -1),
+ BPF_MOV64_IMM(BPF_REG_4, -1),
+ BPF_MOV64_IMM(BPF_REG_5, -1),
+ BPF_MOV64_IMM(BPF_REG_6, -1),
+ BPF_MOV64_IMM(BPF_REG_7, -1),
+ BPF_MOV64_IMM(BPF_REG_8, -1),
+ /* Store both R9 and R10 with BPF_B and read back. */
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_9, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_1, 0),
+ /* Should read back as same value. */
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 42,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},