aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLuke Nelson <luke.r.nels@gmail.com>2019-05-30 15:29:22 -0700
committerAlexei Starovoitov <ast@kernel.org>2019-05-31 17:08:53 -0700
commit1e692f09e091bf5c8b38384f297d6dae5dbf0f12 (patch)
treebd39ab619e1c156b8a8b6701527ba6b472dc3748 /arch
parentlibbpf: Return btf_fd for load_sk_storage_btf (diff)
downloadlinux-dev-1e692f09e091bf5c8b38384f297d6dae5dbf0f12.tar.xz
linux-dev-1e692f09e091bf5c8b38384f297d6dae5dbf0f12.zip
bpf, riscv: clear high 32 bits for ALU32 add/sub/neg/lsh/rsh/arsh
In BPF, 32-bit ALU operations should zero-extend their results into the 64-bit registers. The current BPF JIT on RISC-V emits incorrect instructions that perform sign extension only (e.g., addw, subw) on 32-bit add, sub, lsh, rsh, arsh, and neg. This behavior diverges from the interpreter and JITs for other architectures. This patch fixes the bugs by performing zero extension on the destination register of 32-bit ALU operations. Fixes: 2353ecc6f91f ("bpf, riscv: add BPF JIT for RV64G") Cc: Xi Wang <xi.wang@gmail.com> Signed-off-by: Luke Nelson <luke.r.nels@gmail.com> Acked-by: Song Liu <songliubraving@fb.com> Acked-by: Björn Töpel <bjorn.topel@gmail.com> Reviewed-by: Palmer Dabbelt <palmer@sifive.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/riscv/net/bpf_jit_comp.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c
index e5c8d675bd6e..426d5c33ea90 100644
--- a/arch/riscv/net/bpf_jit_comp.c
+++ b/arch/riscv/net/bpf_jit_comp.c
@@ -751,10 +751,14 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_X:
emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_X:
emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_AND | BPF_X:
@@ -795,14 +799,20 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU64 | BPF_RSH | BPF_X:
emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_X:
case BPF_ALU64 | BPF_ARSH | BPF_X:
emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
break;
/* dst = -dst */
@@ -810,6 +820,8 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU64 | BPF_NEG:
emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) :
rv_subw(rd, RV_REG_ZERO, rd), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
break;
/* dst = BSWAP##imm(dst) */
@@ -964,14 +976,20 @@ out_be:
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_K:
emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
break;
/* JUMP off */