aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/net
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2019-11-22 21:07:54 +0100
committerAlexei Starovoitov <ast@kernel.org>2019-11-24 16:58:47 -0800
commit4b3da77b72ad6b3c48c6fe4a395ace7db39a12c5 (patch)
tree3c65c1076f917574a67c3ee1c4de18364549bebc /arch/x86/net
parentselftests/bpf: Add BPF trampoline performance test (diff)
downloadlinux-dev-4b3da77b72ad6b3c48c6fe4a395ace7db39a12c5.tar.xz
linux-dev-4b3da77b72ad6b3c48c6fe4a395ace7db39a12c5.zip
bpf, x86: Generalize and extend bpf_arch_text_poke for direct jumps
Add BPF_MOD_{NOP_TO_JUMP,JUMP_TO_JUMP,JUMP_TO_NOP} patching for x86 JIT in order to be able to patch direct jumps or nop them out. We need this facility in order to patch tail call jumps and in later work also BPF static keys. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Andrii Nakryiko <andriin@fb.com> Link: https://lore.kernel.org/bpf/aa4784196a8e5e985af4b30a4fe5336bce6e9643.1574452833.git.daniel@iogearbox.net
Diffstat (limited to 'arch/x86/net')
-rw-r--r--arch/x86/net/bpf_jit_comp.c64
1 files changed, 46 insertions, 18 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 2e586f579945..f438bd3b7689 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -203,8 +203,9 @@ struct jit_context {
/* Maximum number of bytes emitted while JITing one eBPF insn */
#define BPF_MAX_INSN_SIZE 128
#define BPF_INSN_SAFETY 64
-/* number of bytes emit_call() needs to generate call instruction */
-#define X86_CALL_SIZE 5
+
+/* Number of bytes emit_patch() needs to generate instructions */
+#define X86_PATCH_SIZE 5
#define PROLOGUE_SIZE 25
@@ -215,7 +216,7 @@ struct jit_context {
static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
{
u8 *prog = *pprog;
- int cnt = X86_CALL_SIZE;
+ int cnt = X86_PATCH_SIZE;
/* BPF trampoline can be made to work without these nops,
* but let's waste 5 bytes for now and optimize later
@@ -480,64 +481,91 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
*pprog = prog;
}
-static int emit_call(u8 **pprog, void *func, void *ip)
+static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
{
u8 *prog = *pprog;
int cnt = 0;
s64 offset;
- offset = func - (ip + X86_CALL_SIZE);
+ offset = func - (ip + X86_PATCH_SIZE);
if (!is_simm32(offset)) {
pr_err("Target call %p is out of range\n", func);
return -EINVAL;
}
- EMIT1_off32(0xE8, offset);
+ EMIT1_off32(opcode, offset);
*pprog = prog;
return 0;
}
+static int emit_call(u8 **pprog, void *func, void *ip)
+{
+ return emit_patch(pprog, func, ip, 0xE8);
+}
+
+static int emit_jump(u8 **pprog, void *func, void *ip)
+{
+ return emit_patch(pprog, func, ip, 0xE9);
+}
+
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *old_addr, void *new_addr)
{
- u8 old_insn[X86_CALL_SIZE] = {};
- u8 new_insn[X86_CALL_SIZE] = {};
+ int (*emit_patch_fn)(u8 **pprog, void *func, void *ip);
+ u8 old_insn[X86_PATCH_SIZE] = {};
+ u8 new_insn[X86_PATCH_SIZE] = {};
u8 *prog;
int ret;
if (!is_kernel_text((long)ip) &&
!is_bpf_text_address((long)ip))
- /* BPF trampoline in modules is not supported */
+ /* BPF poking in modules is not supported */
return -EINVAL;
+ switch (t) {
+ case BPF_MOD_NOP_TO_CALL ... BPF_MOD_CALL_TO_NOP:
+ emit_patch_fn = emit_call;
+ break;
+ case BPF_MOD_NOP_TO_JUMP ... BPF_MOD_JUMP_TO_NOP:
+ emit_patch_fn = emit_jump;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
if (old_addr) {
prog = old_insn;
- ret = emit_call(&prog, old_addr, (void *)ip);
+ ret = emit_patch_fn(&prog, old_addr, (void *)ip);
if (ret)
return ret;
}
if (new_addr) {
prog = new_insn;
- ret = emit_call(&prog, new_addr, (void *)ip);
+ ret = emit_patch_fn(&prog, new_addr, (void *)ip);
if (ret)
return ret;
}
+
ret = -EBUSY;
mutex_lock(&text_mutex);
switch (t) {
case BPF_MOD_NOP_TO_CALL:
- if (memcmp(ip, ideal_nops[NOP_ATOMIC5], X86_CALL_SIZE))
+ case BPF_MOD_NOP_TO_JUMP:
+ if (memcmp(ip, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE))
goto out;
- text_poke_bp(ip, new_insn, X86_CALL_SIZE, NULL);
+ text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
break;
case BPF_MOD_CALL_TO_CALL:
- if (memcmp(ip, old_insn, X86_CALL_SIZE))
+ case BPF_MOD_JUMP_TO_JUMP:
+ if (memcmp(ip, old_insn, X86_PATCH_SIZE))
goto out;
- text_poke_bp(ip, new_insn, X86_CALL_SIZE, NULL);
+ text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
break;
case BPF_MOD_CALL_TO_NOP:
- if (memcmp(ip, old_insn, X86_CALL_SIZE))
+ case BPF_MOD_JUMP_TO_NOP:
+ if (memcmp(ip, old_insn, X86_PATCH_SIZE))
goto out;
- text_poke_bp(ip, ideal_nops[NOP_ATOMIC5], X86_CALL_SIZE, NULL);
+ text_poke_bp(ip, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE,
+ NULL);
break;
}
ret = 0;
@@ -1394,7 +1422,7 @@ int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags
/* skip patched call instruction and point orig_call to actual
* body of the kernel function.
*/
- orig_call += X86_CALL_SIZE;
+ orig_call += X86_PATCH_SIZE;
prog = image;