aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/bpf/verifier.c64
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c82
2 files changed, 127 insertions, 19 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8f69df7e8167..fb3513b35c0b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1566,22 +1566,54 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
struct bpf_reg_state *src_reg = &regs[insn->src_reg];
u8 opcode = BPF_OP(insn->code);
+ u64 dst_imm = dst_reg->imm;
- /* dst_reg->type == CONST_IMM here, simulate execution of 'add'/'or'
- * insn. Don't care about overflow or negative values, just add them
+ /* dst_reg->type == CONST_IMM here. Simulate execution of insns
+ * containing ALU ops. Don't care about overflow or negative
+ * values, just add/sub/... them; registers are in u64.
*/
- if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K)
- dst_reg->imm += insn->imm;
- else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
- src_reg->type == CONST_IMM)
- dst_reg->imm += src_reg->imm;
- else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K)
- dst_reg->imm |= insn->imm;
- else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X &&
- src_reg->type == CONST_IMM)
- dst_reg->imm |= src_reg->imm;
- else
+ if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm += insn->imm;
+ } else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm += src_reg->imm;
+ } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm -= insn->imm;
+ } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm -= src_reg->imm;
+ } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm *= insn->imm;
+ } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm *= src_reg->imm;
+ } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm |= insn->imm;
+ } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm |= src_reg->imm;
+ } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm &= insn->imm;
+ } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm &= src_reg->imm;
+ } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm >>= insn->imm;
+ } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm >>= src_reg->imm;
+ } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm <<= insn->imm;
+ } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm <<= src_reg->imm;
+ } else {
mark_reg_unknown_value(regs, insn->dst_reg);
+ goto out;
+ }
+
+ dst_reg->imm = dst_imm;
+out:
return 0;
}
@@ -2225,14 +2257,8 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
return err;
if (insn->src_reg == 0) {
- /* generic move 64-bit immediate into a register,
- * only analyzer needs to collect the ld_imm value.
- */
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
- if (!env->analyzer_ops)
- return 0;
-
regs[insn->dst_reg].type = CONST_IMM;
regs[insn->dst_reg].imm = imm;
return 0;
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 1aa73241c999..0d0912c7f03c 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2326,6 +2326,84 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "direct packet access: test11 (shift, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test12 (and, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test13 (branches, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 14),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 24),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"helper access to packet: test1, valid packet_ptr range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -4208,6 +4286,8 @@ static struct bpf_test tests[] = {
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
@@ -4251,6 +4331,8 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0),