aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/Makefile2
-rw-r--r--kernel/bpf/arraymap.c78
-rw-r--r--kernel/bpf/cgroup.c54
-rw-r--r--kernel/bpf/core.c441
-rw-r--r--kernel/bpf/cpumap.c33
-rw-r--r--kernel/bpf/devmap.c8
-rw-r--r--kernel/bpf/disasm.c107
-rw-r--r--kernel/bpf/disasm.h26
-rw-r--r--kernel/bpf/hashtab.c103
-rw-r--r--kernel/bpf/inode.c53
-rw-r--r--kernel/bpf/lpm_trie.c112
-rw-r--r--kernel/bpf/offload.c430
-rw-r--r--kernel/bpf/sockmap.c1208
-rw-r--r--kernel/bpf/stackmap.c291
-rw-r--r--kernel/bpf/syscall.c414
-rw-r--r--kernel/bpf/verifier.c1534
16 files changed, 4062 insertions, 832 deletions
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index e691da0b3bab..a713fd23ec88 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -9,9 +9,11 @@ obj-$(CONFIG_BPF_SYSCALL) += devmap.o
obj-$(CONFIG_BPF_SYSCALL) += cpumap.o
obj-$(CONFIG_BPF_SYSCALL) += offload.o
ifeq ($(CONFIG_STREAM_PARSER),y)
+ifeq ($(CONFIG_INET),y)
obj-$(CONFIG_BPF_SYSCALL) += sockmap.o
endif
endif
+endif
ifeq ($(CONFIG_PERF_EVENTS),y)
obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
endif
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index ab94d304a634..14750e7c5ee4 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -26,8 +26,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
{
int i;
- for (i = 0; i < array->map.max_entries; i++)
+ for (i = 0; i < array->map.max_entries; i++) {
free_percpu(array->pptrs[i]);
+ cond_resched();
+ }
}
static int bpf_array_alloc_percpu(struct bpf_array *array)
@@ -43,33 +45,42 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
return -ENOMEM;
}
array->pptrs[i] = ptr;
+ cond_resched();
}
return 0;
}
/* Called from syscall */
-static struct bpf_map *array_map_alloc(union bpf_attr *attr)
+static int array_map_alloc_check(union bpf_attr *attr)
{
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
int numa_node = bpf_map_attr_numa_node(attr);
- u32 elem_size, index_mask, max_entries;
- bool unpriv = !capable(CAP_SYS_ADMIN);
- struct bpf_array *array;
- u64 array_size, mask64;
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size == 0 ||
attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
(percpu && numa_node != NUMA_NO_NODE))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (attr->value_size > KMALLOC_MAX_SIZE)
/* if value_size is bigger, the user space won't be able to
* access the elements.
*/
- return ERR_PTR(-E2BIG);
+ return -E2BIG;
+
+ return 0;
+}
+
+static struct bpf_map *array_map_alloc(union bpf_attr *attr)
+{
+ bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
+ int ret, numa_node = bpf_map_attr_numa_node(attr);
+ u32 elem_size, index_mask, max_entries;
+ bool unpriv = !capable(CAP_SYS_ADMIN);
+ u64 cost, array_size, mask64;
+ struct bpf_array *array;
elem_size = round_up(attr->value_size, 8);
@@ -101,8 +112,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array_size += (u64) max_entries * elem_size;
/* make sure there is no u32 overflow later in round_up() */
- if (array_size >= U32_MAX - PAGE_SIZE)
+ cost = array_size;
+ if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-ENOMEM);
+ if (percpu) {
+ cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
+ if (cost >= U32_MAX - PAGE_SIZE)
+ return ERR_PTR(-ENOMEM);
+ }
+ cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+
+ ret = bpf_map_precharge_memlock(cost);
+ if (ret < 0)
+ return ERR_PTR(ret);
/* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(array_size, numa_node);
@@ -112,26 +134,14 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array->map.unpriv_array = unpriv;
/* copy mandatory map attributes */
- array->map.map_type = attr->map_type;
- array->map.key_size = attr->key_size;
- array->map.value_size = attr->value_size;
- array->map.max_entries = attr->max_entries;
- array->map.map_flags = attr->map_flags;
- array->map.numa_node = numa_node;
+ bpf_map_init_from_attr(&array->map, attr);
+ array->map.pages = cost;
array->elem_size = elem_size;
- if (!percpu)
- goto out;
-
- array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
-
- if (array_size >= U32_MAX - PAGE_SIZE ||
- bpf_array_alloc_percpu(array)) {
+ if (percpu && bpf_array_alloc_percpu(array)) {
bpf_map_area_free(array);
return ERR_PTR(-ENOMEM);
}
-out:
- array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
return &array->map;
}
@@ -327,6 +337,7 @@ static void array_map_free(struct bpf_map *map)
}
const struct bpf_map_ops array_map_ops = {
+ .map_alloc_check = array_map_alloc_check,
.map_alloc = array_map_alloc,
.map_free = array_map_free,
.map_get_next_key = array_map_get_next_key,
@@ -337,6 +348,7 @@ const struct bpf_map_ops array_map_ops = {
};
const struct bpf_map_ops percpu_array_map_ops = {
+ .map_alloc_check = array_map_alloc_check,
.map_alloc = array_map_alloc,
.map_free = array_map_free,
.map_get_next_key = array_map_get_next_key,
@@ -345,12 +357,12 @@ const struct bpf_map_ops percpu_array_map_ops = {
.map_delete_elem = array_map_delete_elem,
};
-static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
+static int fd_array_map_alloc_check(union bpf_attr *attr)
{
/* only file descriptors can be stored in this type of map */
if (attr->value_size != sizeof(u32))
- return ERR_PTR(-EINVAL);
- return array_map_alloc(attr);
+ return -EINVAL;
+ return array_map_alloc_check(attr);
}
static void fd_array_map_free(struct bpf_map *map)
@@ -474,7 +486,8 @@ void bpf_fd_array_map_clear(struct bpf_map *map)
}
const struct bpf_map_ops prog_array_map_ops = {
- .map_alloc = fd_array_map_alloc,
+ .map_alloc_check = fd_array_map_alloc_check,
+ .map_alloc = array_map_alloc,
.map_free = fd_array_map_free,
.map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem,
@@ -561,7 +574,8 @@ static void perf_event_fd_array_release(struct bpf_map *map,
}
const struct bpf_map_ops perf_event_array_map_ops = {
- .map_alloc = fd_array_map_alloc,
+ .map_alloc_check = fd_array_map_alloc_check,
+ .map_alloc = array_map_alloc,
.map_free = fd_array_map_free,
.map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem,
@@ -592,7 +606,8 @@ static void cgroup_fd_array_free(struct bpf_map *map)
}
const struct bpf_map_ops cgroup_array_map_ops = {
- .map_alloc = fd_array_map_alloc,
+ .map_alloc_check = fd_array_map_alloc_check,
+ .map_alloc = array_map_alloc,
.map_free = cgroup_fd_array_free,
.map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem,
@@ -610,7 +625,7 @@ static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
if (IS_ERR(inner_map_meta))
return inner_map_meta;
- map = fd_array_map_alloc(attr);
+ map = array_map_alloc(attr);
if (IS_ERR(map)) {
bpf_map_meta_free(inner_map_meta);
return map;
@@ -673,6 +688,7 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
}
const struct bpf_map_ops array_of_maps_map_ops = {
+ .map_alloc_check = fd_array_map_alloc_check,
.map_alloc = array_of_map_alloc,
.map_free = array_of_map_free,
.map_get_next_key = array_map_get_next_key,
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index b789ab78d28f..43171a0bb02b 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -495,6 +495,42 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
/**
+ * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
+ * provided by user sockaddr
+ * @sk: sock struct that will use sockaddr
+ * @uaddr: sockaddr struct provided by user
+ * @type: The type of program to be exectuted
+ *
+ * socket is expected to be of type INET or INET6.
+ *
+ * This function will return %-EPERM if an attached program is found and
+ * returned value != 1 during execution. In all other cases, 0 is returned.
+ */
+int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
+ struct sockaddr *uaddr,
+ enum bpf_attach_type type)
+{
+ struct bpf_sock_addr_kern ctx = {
+ .sk = sk,
+ .uaddr = uaddr,
+ };
+ struct cgroup *cgrp;
+ int ret;
+
+ /* Check socket family since not all sockets represent network
+ * endpoint (e.g. AF_UNIX).
+ */
+ if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
+ return 0;
+
+ cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
+
+ return ret == 1 ? 0 : -EPERM;
+}
+EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
+
+/**
* __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
* @sk: socket to get cgroup from
* @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
@@ -545,7 +581,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
static const struct bpf_func_proto *
-cgroup_dev_func_proto(enum bpf_func_id func_id)
+cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
@@ -566,8 +602,11 @@ cgroup_dev_func_proto(enum bpf_func_id func_id)
static bool cgroup_dev_is_valid_access(int off, int size,
enum bpf_access_type type,
+ const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
+ const int size_default = sizeof(__u32);
+
if (type == BPF_WRITE)
return false;
@@ -576,8 +615,17 @@ static bool cgroup_dev_is_valid_access(int off, int size,
/* The verifier guarantees that size > 0. */
if (off % size != 0)
return false;
- if (size != sizeof(__u32))
- return false;
+
+ switch (off) {
+ case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
+ bpf_ctx_record_field_size(info, size_default);
+ if (!bpf_ctx_narrow_access_ok(off, size, size_default))
+ return false;
+ break;
+ default:
+ if (size != size_default)
+ return false;
+ }
return true;
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 7949e8b8f94e..d315b393abdd 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -94,6 +94,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
fp->pages = size / PAGE_SIZE;
fp->aux = aux;
fp->aux->prog = fp;
+ fp->jit_requested = ebpf_jit_enabled();
INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
@@ -217,30 +218,40 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
return 0;
}
-static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
-{
- return BPF_CLASS(insn->code) == BPF_JMP &&
- /* Call and Exit are both special jumps with no
- * target inside the BPF instruction image.
- */
- BPF_OP(insn->code) != BPF_CALL &&
- BPF_OP(insn->code) != BPF_EXIT;
-}
-
static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
{
struct bpf_insn *insn = prog->insnsi;
u32 i, insn_cnt = prog->len;
+ bool pseudo_call;
+ u8 code;
+ int off;
for (i = 0; i < insn_cnt; i++, insn++) {
- if (!bpf_is_jmp_and_has_target(insn))
+ code = insn->code;
+ if (BPF_CLASS(code) != BPF_JMP)
continue;
+ if (BPF_OP(code) == BPF_EXIT)
+ continue;
+ if (BPF_OP(code) == BPF_CALL) {
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ pseudo_call = true;
+ else
+ continue;
+ } else {
+ pseudo_call = false;
+ }
+ off = pseudo_call ? insn->imm : insn->off;
/* Adjust offset of jmps if we cross boundaries. */
- if (i < pos && i + insn->off + 1 > pos)
- insn->off += delta;
- else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
- insn->off -= delta;
+ if (i < pos && i + off + 1 > pos)
+ off += delta;
+ else if (i > pos + delta && i + off + 1 <= pos + delta)
+ off -= delta;
+
+ if (pseudo_call)
+ insn->imm = off;
+ else
+ insn->off = off;
}
}
@@ -289,6 +300,11 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
}
#ifdef CONFIG_BPF_JIT
+/* All BPF JIT sysctl knobs here. */
+int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
+int bpf_jit_harden __read_mostly;
+int bpf_jit_kallsyms __read_mostly;
+
static __always_inline void
bpf_get_prog_addr_region(const struct bpf_prog *prog,
unsigned long *symbol_start,
@@ -370,8 +386,6 @@ static DEFINE_SPINLOCK(bpf_lock);
static LIST_HEAD(bpf_kallsyms);
static struct latch_tree_root bpf_tree __cacheline_aligned;
-int bpf_jit_kallsyms __read_mostly;
-
static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
{
WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
@@ -552,8 +566,6 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
-int bpf_jit_harden __read_mostly;
-
static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux,
struct bpf_insn *to_buff)
@@ -711,7 +723,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
struct bpf_insn *insn;
int i, rewritten;
- if (!bpf_jit_blinding_enabled())
+ if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
return prog;
clone = bpf_prog_clone_create(prog, GFP_USER);
@@ -753,13 +765,16 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
i += insn_delta;
}
+ clone->blinded = 1;
return clone;
}
#endif /* CONFIG_BPF_JIT */
/* Base function for offset calculation. Needs to go into .text section,
* therefore keeping it non-static as well; will also be used by JITs
- * anyway later on, so do not let the compiler omit it.
+ * anyway later on, so do not let the compiler omit it. This also needs
+ * to go into kallsyms for correlation from e.g. bpftool, so naming
+ * must not change.
*/
noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
@@ -767,6 +782,137 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
}
EXPORT_SYMBOL_GPL(__bpf_call_base);
+/* All UAPI available opcodes. */
+#define BPF_INSN_MAP(INSN_2, INSN_3) \
+ /* 32 bit ALU operations. */ \
+ /* Register based. */ \
+ INSN_3(ALU, ADD, X), \
+ INSN_3(ALU, SUB, X), \
+ INSN_3(ALU, AND, X), \
+ INSN_3(ALU, OR, X), \
+ INSN_3(ALU, LSH, X), \
+ INSN_3(ALU, RSH, X), \
+ INSN_3(ALU, XOR, X), \
+ INSN_3(ALU, MUL, X), \
+ INSN_3(ALU, MOV, X), \
+ INSN_3(ALU, DIV, X), \
+ INSN_3(ALU, MOD, X), \
+ INSN_2(ALU, NEG), \
+ INSN_3(ALU, END, TO_BE), \
+ INSN_3(ALU, END, TO_LE), \
+ /* Immediate based. */ \
+ INSN_3(ALU, ADD, K), \
+ INSN_3(ALU, SUB, K), \
+ INSN_3(ALU, AND, K), \
+ INSN_3(ALU, OR, K), \
+ INSN_3(ALU, LSH, K), \
+ INSN_3(ALU, RSH, K), \
+ INSN_3(ALU, XOR, K), \
+ INSN_3(ALU, MUL, K), \
+ INSN_3(ALU, MOV, K), \
+ INSN_3(ALU, DIV, K), \
+ INSN_3(ALU, MOD, K), \
+ /* 64 bit ALU operations. */ \
+ /* Register based. */ \
+ INSN_3(ALU64, ADD, X), \
+ INSN_3(ALU64, SUB, X), \
+ INSN_3(ALU64, AND, X), \
+ INSN_3(ALU64, OR, X), \
+ INSN_3(ALU64, LSH, X), \
+ INSN_3(ALU64, RSH, X), \
+ INSN_3(ALU64, XOR, X), \
+ INSN_3(ALU64, MUL, X), \
+ INSN_3(ALU64, MOV, X), \
+ INSN_3(ALU64, ARSH, X), \
+ INSN_3(ALU64, DIV, X), \
+ INSN_3(ALU64, MOD, X), \
+ INSN_2(ALU64, NEG), \
+ /* Immediate based. */ \
+ INSN_3(ALU64, ADD, K), \
+ INSN_3(ALU64, SUB, K), \
+ INSN_3(ALU64, AND, K), \
+ INSN_3(ALU64, OR, K), \
+ INSN_3(ALU64, LSH, K), \
+ INSN_3(ALU64, RSH, K), \
+ INSN_3(ALU64, XOR, K), \
+ INSN_3(ALU64, MUL, K), \
+ INSN_3(ALU64, MOV, K), \
+ INSN_3(ALU64, ARSH, K), \
+ INSN_3(ALU64, DIV, K), \
+ INSN_3(ALU64, MOD, K), \
+ /* Call instruction. */ \
+ INSN_2(JMP, CALL), \
+ /* Exit instruction. */ \
+ INSN_2(JMP, EXIT), \
+ /* Jump instructions. */ \
+ /* Register based. */ \
+ INSN_3(JMP, JEQ, X), \
+ INSN_3(JMP, JNE, X), \
+ INSN_3(JMP, JGT, X), \
+ INSN_3(JMP, JLT, X), \
+ INSN_3(JMP, JGE, X), \
+ INSN_3(JMP, JLE, X), \
+ INSN_3(JMP, JSGT, X), \
+ INSN_3(JMP, JSLT, X), \
+ INSN_3(JMP, JSGE, X), \
+ INSN_3(JMP, JSLE, X), \
+ INSN_3(JMP, JSET, X), \
+ /* Immediate based. */ \
+ INSN_3(JMP, JEQ, K), \
+ INSN_3(JMP, JNE, K), \
+ INSN_3(JMP, JGT, K), \
+ INSN_3(JMP, JLT, K), \
+ INSN_3(JMP, JGE, K), \
+ INSN_3(JMP, JLE, K), \
+ INSN_3(JMP, JSGT, K), \
+ INSN_3(JMP, JSLT, K), \
+ INSN_3(JMP, JSGE, K), \
+ INSN_3(JMP, JSLE, K), \
+ INSN_3(JMP, JSET, K), \
+ INSN_2(JMP, JA), \
+ /* Store instructions. */ \
+ /* Register based. */ \
+ INSN_3(STX, MEM, B), \
+ INSN_3(STX, MEM, H), \
+ INSN_3(STX, MEM, W), \
+ INSN_3(STX, MEM, DW), \
+ INSN_3(STX, XADD, W), \
+ INSN_3(STX, XADD, DW), \
+ /* Immediate based. */ \
+ INSN_3(ST, MEM, B), \
+ INSN_3(ST, MEM, H), \
+ INSN_3(ST, MEM, W), \
+ INSN_3(ST, MEM, DW), \
+ /* Load instructions. */ \
+ /* Register based. */ \
+ INSN_3(LDX, MEM, B), \
+ INSN_3(LDX, MEM, H), \
+ INSN_3(LDX, MEM, W), \
+ INSN_3(LDX, MEM, DW), \
+ /* Immediate based. */ \
+ INSN_3(LD, IMM, DW), \
+ /* Misc (old cBPF carry-over). */ \
+ INSN_3(LD, ABS, B), \
+ INSN_3(LD, ABS, H), \
+ INSN_3(LD, ABS, W), \
+ INSN_3(LD, IND, B), \
+ INSN_3(LD, IND, H), \
+ INSN_3(LD, IND, W)
+
+bool bpf_opcode_in_insntable(u8 code)
+{
+#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
+#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
+ static const bool public_insntable[256] = {
+ [0 ... 255] = false,
+ /* Now overwrite non-defaults ... */
+ BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
+ };
+#undef BPF_INSN_3_TBL
+#undef BPF_INSN_2_TBL
+ return public_insntable[code];
+}
+
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
/**
* __bpf_prog_run - run eBPF program on a given context
@@ -775,118 +921,21 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
*
* Decode and execute eBPF instructions.
*/
-static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
- u64 *stack)
+static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
{
u64 tmp;
+#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
+#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
static const void *jumptable[256] = {
[0 ... 255] = &&default_label,
/* Now overwrite non-defaults ... */
- /* 32 bit ALU operations */
- [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
- [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
- [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
- [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
- [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
- [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
- [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
- [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
- [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
- [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
- [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
- [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
- [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
- [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
- [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
- [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
- [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
- [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
- [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
- [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
- [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
- [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
- [BPF_ALU | BPF_NEG] = &&ALU_NEG,
- [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
- [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
- /* 64 bit ALU operations */
- [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
- [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
- [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
- [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
- [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
- [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
- [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
- [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
- [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
- [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
- [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
- [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
- [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
- [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
- [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
- [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
- [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
- [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
- [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
- [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
- [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
- [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
- [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
- [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
- [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
- /* Call instruction */
- [BPF_JMP | BPF_CALL] = &&JMP_CALL,
+ BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
+ /* Non-UAPI available opcodes. */
+ [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
- /* Jumps */
- [BPF_JMP | BPF_JA] = &&JMP_JA,
- [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
- [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
- [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
- [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
- [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
- [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
- [BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
- [BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
- [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
- [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
- [BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
- [BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
- [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
- [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
- [BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
- [BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
- [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
- [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
- [BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
- [BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
- [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
- [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
- /* Program return */
- [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
- /* Store instructions */
- [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
- [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
- [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
- [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
- [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
- [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
- [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
- [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
- [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
- [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
- /* Load instructions */
- [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
- [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
- [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
- [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
- [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
- [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
- [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
- [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
- [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
- [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
- [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
};
+#undef BPF_INSN_3_LBL
+#undef BPF_INSN_2_LBL
u32 tail_call_cnt = 0;
void *ptr;
int off;
@@ -950,14 +999,10 @@ select_insn:
(*(s64 *) &DST) >>= IMM;
CONT;
ALU64_MOD_X:
- if (unlikely(SRC == 0))
- return 0;
div64_u64_rem(DST, SRC, &tmp);
DST = tmp;
CONT;
ALU_MOD_X:
- if (unlikely((u32)SRC == 0))
- return 0;
tmp = (u32) DST;
DST = do_div(tmp, (u32) SRC);
CONT;
@@ -970,13 +1015,9 @@ select_insn:
DST = do_div(tmp, (u32) IMM);
CONT;
ALU64_DIV_X:
- if (unlikely(SRC == 0))
- return 0;
DST = div64_u64(DST, SRC);
CONT;
ALU_DIV_X:
- if (unlikely((u32)SRC == 0))
- return 0;
tmp = (u32) DST;
do_div(tmp, (u32) SRC);
DST = (u32) tmp;
@@ -1026,6 +1067,13 @@ select_insn:
BPF_R4, BPF_R5);
CONT;
+ JMP_CALL_ARGS:
+ BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
+ BPF_R3, BPF_R4,
+ BPF_R5,
+ insn + insn->off + 1);
+ CONT;
+
JMP_TAIL_CALL: {
struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
struct bpf_array *array = container_of(map, struct bpf_array, map);
@@ -1280,8 +1328,14 @@ load_byte:
goto load_byte;
default_label:
- /* If we ever reach this, we have a bug somewhere. */
- WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
+ /* If we ever reach this, we have a bug somewhere. Die hard here
+ * instead of just returning 0; we could be somewhere in a subprog,
+ * so execution could continue otherwise which we do /not/ want.
+ *
+ * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
+ */
+ pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
+ BUG_ON(1);
return 0;
}
STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
@@ -1298,6 +1352,23 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
return ___bpf_prog_run(regs, insn, stack); \
}
+#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
+#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
+static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
+ const struct bpf_insn *insn) \
+{ \
+ u64 stack[stack_size / sizeof(u64)]; \
+ u64 regs[MAX_BPF_REG]; \
+\
+ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
+ BPF_R1 = r1; \
+ BPF_R2 = r2; \
+ BPF_R3 = r3; \
+ BPF_R4 = r4; \
+ BPF_R5 = r5; \
+ return ___bpf_prog_run(regs, insn, stack); \
+}
+
#define EVAL1(FN, X) FN(X)
#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
@@ -1309,6 +1380,10 @@ EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
+EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
+EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
+EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
+
#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
static unsigned int (*interpreters[])(const void *ctx,
@@ -1317,11 +1392,33 @@ EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
};
+#undef PROG_NAME_LIST
+#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
+static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
+ const struct bpf_insn *insn) = {
+EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
+EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
+EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
+};
+#undef PROG_NAME_LIST
+
+void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
+{
+ stack_depth = max_t(u32, stack_depth, 1);
+ insn->off = (s16) insn->imm;
+ insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
+ __bpf_call_base_args;
+ insn->code = BPF_JMP | BPF_CALL_ARGS;
+}
#else
-static unsigned int __bpf_prog_ret0(const void *ctx,
- const struct bpf_insn *insn)
+static unsigned int __bpf_prog_ret0_warn(const void *ctx,
+ const struct bpf_insn *insn)
{
+ /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
+ * is not working properly, so warn about it!
+ */
+ WARN_ON_ONCE(1);
return 0;
}
#endif
@@ -1329,6 +1426,9 @@ static unsigned int __bpf_prog_ret0(const void *ctx,
bool bpf_prog_array_compatible(struct bpf_array *array,
const struct bpf_prog *fp)
{
+ if (fp->kprobe_override)
+ return false;
+
if (!array->owner_prog_type) {
/* There's no owner yet where we could check for
* compatibility.
@@ -1378,7 +1478,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
#else
- fp->bpf_func = __bpf_prog_ret0;
+ fp->bpf_func = __bpf_prog_ret0_warn;
#endif
/* eBPF JITs can rewrite the program in case constant
@@ -1476,23 +1576,41 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
__u32 __user *prog_ids, u32 cnt)
{
struct bpf_prog **prog;
- u32 i = 0, id;
-
+ unsigned long err = 0;
+ u32 i = 0, *ids;
+ bool nospc;
+
+ /* users of this function are doing:
+ * cnt = bpf_prog_array_length();
+ * if (cnt > 0)
+ * bpf_prog_array_copy_to_user(..., cnt);
+ * so below kcalloc doesn't need extra cnt > 0 check, but
+ * bpf_prog_array_length() releases rcu lock and
+ * prog array could have been swapped with empty or larger array,
+ * so always copy 'cnt' prog_ids to the user.
+ * In a rare race the user will see zero prog_ids
+ */
+ ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
+ if (!ids)
+ return -ENOMEM;
rcu_read_lock();
prog = rcu_dereference(progs)->progs;
for (; *prog; prog++) {
- id = (*prog)->aux->id;
- if (copy_to_user(prog_ids + i, &id, sizeof(id))) {
- rcu_read_unlock();
- return -EFAULT;
- }
+ if (*prog == &dummy_bpf_prog.prog)
+ continue;
+ ids[i] = (*prog)->aux->id;
if (++i == cnt) {
prog++;
break;
}
}
+ nospc = !!(*prog);
rcu_read_unlock();
- if (*prog)
+ err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
+ kfree(ids);
+ if (err)
+ return -EFAULT;
+ if (nospc)
return -ENOSPC;
return 0;
}
@@ -1564,14 +1682,41 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
return 0;
}
+int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+ __u32 __user *prog_ids, u32 request_cnt,
+ __u32 __user *prog_cnt)
+{
+ u32 cnt = 0;
+
+ if (array)
+ cnt = bpf_prog_array_length(array);
+
+ if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
+ return -EFAULT;
+
+ /* return early if user requested only program count or nothing to copy */
+ if (!request_cnt || !cnt)
+ return 0;
+
+ return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
+}
+
static void bpf_prog_free_deferred(struct work_struct *work)
{
struct bpf_prog_aux *aux;
+ int i;
aux = container_of(work, struct bpf_prog_aux, work);
if (bpf_prog_is_dev_bound(aux))
bpf_prog_offload_destroy(aux->prog);
- bpf_jit_free(aux->prog);
+ for (i = 0; i < aux->func_cnt; i++)
+ bpf_jit_free(aux->func[i]);
+ if (aux->func_cnt) {
+ kfree(aux->func);
+ bpf_prog_unlock_free(aux->prog);
+ } else {
+ bpf_jit_free(aux->prog);
+ }
}
/* Free internal BPF program */
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index ce5b669003b2..a4bb0b34375a 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -94,13 +94,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
if (!cmap)
return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- cmap->map.map_type = attr->map_type;
- cmap->map.key_size = attr->key_size;
- cmap->map.value_size = attr->value_size;
- cmap->map.max_entries = attr->max_entries;
- cmap->map.map_flags = attr->map_flags;
- cmap->map.numa_node = bpf_map_attr_numa_node(attr);
+ bpf_map_init_from_attr(&cmap->map, attr);
/* Pre-limit array size based on NR_CPUS, not final CPU check */
if (cmap->map.max_entries > NR_CPUS) {
@@ -143,7 +137,7 @@ free_cmap:
return ERR_PTR(err);
}
-void __cpu_map_queue_destructor(void *ptr)
+static void __cpu_map_queue_destructor(void *ptr)
{
/* The tear-down procedure should have made sure that queue is
* empty. See __cpu_map_entry_replace() and work-queue
@@ -222,8 +216,8 @@ static struct xdp_pkt *convert_to_xdp_pkt(struct xdp_buff *xdp)
return xdp_pkt;
}
-struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
- struct xdp_pkt *xdp_pkt)
+static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
+ struct xdp_pkt *xdp_pkt)
{
unsigned int frame_size;
void *pkt_data_start;
@@ -337,9 +331,10 @@ static int cpu_map_kthread_run(void *data)
return 0;
}
-struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, int map_id)
+static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
+ int map_id)
{
- gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct bpf_cpu_map_entry *rcpu;
int numa, err;
@@ -395,7 +390,7 @@ free_rcu:
return NULL;
}
-void __cpu_map_entry_free(struct rcu_head *rcu)
+static void __cpu_map_entry_free(struct rcu_head *rcu)
{
struct bpf_cpu_map_entry *rcpu;
int cpu;
@@ -438,8 +433,8 @@ void __cpu_map_entry_free(struct rcu_head *rcu)
* cpu_map_kthread_stop, which waits for an RCU graze period before
* stopping kthread, emptying the queue.
*/
-void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
- u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
+static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
+ u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
{
struct bpf_cpu_map_entry *old_rcpu;
@@ -451,7 +446,7 @@ void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
}
}
-int cpu_map_delete_elem(struct bpf_map *map, void *key)
+static int cpu_map_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
u32 key_cpu = *(u32 *)key;
@@ -464,8 +459,8 @@ int cpu_map_delete_elem(struct bpf_map *map, void *key)
return 0;
}
-int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
- u64 map_flags)
+static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
+ u64 map_flags)
{
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
struct bpf_cpu_map_entry *rcpu;
@@ -502,7 +497,7 @@ int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
return 0;
}
-void cpu_map_free(struct bpf_map *map)
+static void cpu_map_free(struct bpf_map *map)
{
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
int cpu;
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index ebdef54bf7df..565f9ece9115 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -93,13 +93,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
if (!dtab)
return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- dtab->map.map_type = attr->map_type;
- dtab->map.key_size = attr->key_size;
- dtab->map.value_size = attr->value_size;
- dtab->map.max_entries = attr->max_entries;
- dtab->map.map_flags = attr->map_flags;
- dtab->map.numa_node = bpf_map_attr_numa_node(attr);
+ bpf_map_init_from_attr(&dtab->map, attr);
/* make sure page count doesn't overflow */
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index e682850c9715..d6b76377cb6e 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -21,10 +21,39 @@ static const char * const func_id_str[] = {
};
#undef __BPF_FUNC_STR_FN
-const char *func_id_name(int id)
+static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
+ const struct bpf_insn *insn,
+ char *buff, size_t len)
{
BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
+ if (insn->src_reg != BPF_PSEUDO_CALL &&
+ insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
+ func_id_str[insn->imm])
+ return func_id_str[insn->imm];
+
+ if (cbs && cbs->cb_call)
+ return cbs->cb_call(cbs->private_data, insn);
+
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ snprintf(buff, len, "%+d", insn->imm);
+
+ return buff;
+}
+
+static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
+ const struct bpf_insn *insn,
+ u64 full_imm, char *buff, size_t len)
+{
+ if (cbs && cbs->cb_imm)
+ return cbs->cb_imm(cbs->private_data, insn, full_imm);
+
+ snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
+ return buff;
+}
+
+const char *func_id_name(int id)
+{
if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
return func_id_str[id];
else
@@ -83,40 +112,43 @@ static const char *const bpf_jmp_string[16] = {
[BPF_EXIT >> 4] = "exit",
};
-static void print_bpf_end_insn(bpf_insn_print_cb verbose,
- struct bpf_verifier_env *env,
+static void print_bpf_end_insn(bpf_insn_print_t verbose,
+ void *private_data,
const struct bpf_insn *insn)
{
- verbose(env, "(%02x) r%d = %s%d r%d\n", insn->code, insn->dst_reg,
+ verbose(private_data, "(%02x) r%d = %s%d r%d\n",
+ insn->code, insn->dst_reg,
BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le",
insn->imm, insn->dst_reg);
}
-void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
- const struct bpf_insn *insn, bool allow_ptr_leaks)
+void print_bpf_insn(const struct bpf_insn_cbs *cbs,
+ const struct bpf_insn *insn,
+ bool allow_ptr_leaks)
{
+ const bpf_insn_print_t verbose = cbs->cb_print;
u8 class = BPF_CLASS(insn->code);
if (class == BPF_ALU || class == BPF_ALU64) {
if (BPF_OP(insn->code) == BPF_END) {
if (class == BPF_ALU64)
- verbose(env, "BUG_alu64_%02x\n", insn->code);
+ verbose(cbs->private_data, "BUG_alu64_%02x\n", insn->code);
else
- print_bpf_end_insn(verbose, env, insn);
+ print_bpf_end_insn(verbose, cbs->private_data, insn);
} else if (BPF_OP(insn->code) == BPF_NEG) {
- verbose(env, "(%02x) r%d = %s-r%d\n",
+ verbose(cbs->private_data, "(%02x) r%d = %s-r%d\n",
insn->code, insn->dst_reg,
class == BPF_ALU ? "(u32) " : "",
insn->dst_reg);
} else if (BPF_SRC(insn->code) == BPF_X) {
- verbose(env, "(%02x) %sr%d %s %sr%d\n",
+ verbose(cbs->private_data, "(%02x) %sr%d %s %sr%d\n",
insn->code, class == BPF_ALU ? "(u32) " : "",
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
class == BPF_ALU ? "(u32) " : "",
insn->src_reg);
} else {
- verbose(env, "(%02x) %sr%d %s %s%d\n",
+ verbose(cbs->private_data, "(%02x) %sr%d %s %s%d\n",
insn->code, class == BPF_ALU ? "(u32) " : "",
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
@@ -125,46 +157,46 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
}
} else if (class == BPF_STX) {
if (BPF_MODE(insn->code) == BPF_MEM)
- verbose(env, "(%02x) *(%s *)(r%d %+d) = r%d\n",
+ verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = r%d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg,
insn->off, insn->src_reg);
else if (BPF_MODE(insn->code) == BPF_XADD)
- verbose(env, "(%02x) lock *(%s *)(r%d %+d) += r%d\n",
+ verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) += r%d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg, insn->off,
insn->src_reg);
else
- verbose(env, "BUG_%02x\n", insn->code);
+ verbose(cbs->private_data, "BUG_%02x\n", insn->code);
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM) {
- verbose(env, "BUG_st_%02x\n", insn->code);
+ verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
return;
}
- verbose(env, "(%02x) *(%s *)(r%d %+d) = %d\n",
+ verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg,
insn->off, insn->imm);
} else if (class == BPF_LDX) {
if (BPF_MODE(insn->code) != BPF_MEM) {
- verbose(env, "BUG_ldx_%02x\n", insn->code);
+ verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
return;
}
- verbose(env, "(%02x) r%d = *(%s *)(r%d %+d)\n",
+ verbose(cbs->private_data, "(%02x) r%d = *(%s *)(r%d %+d)\n",
insn->code, insn->dst_reg,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->off);
} else if (class == BPF_LD) {
if (BPF_MODE(insn->code) == BPF_ABS) {
- verbose(env, "(%02x) r0 = *(%s *)skb[%d]\n",
+ verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[%d]\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->imm);
} else if (BPF_MODE(insn->code) == BPF_IND) {
- verbose(env, "(%02x) r0 = *(%s *)skb[r%d + %d]\n",
+ verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[r%d + %d]\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->imm);
@@ -175,40 +207,55 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
*/
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
+ char tmp[64];
if (map_ptr && !allow_ptr_leaks)
imm = 0;
- verbose(env, "(%02x) r%d = 0x%llx\n", insn->code,
- insn->dst_reg, (unsigned long long)imm);
+ verbose(cbs->private_data, "(%02x) r%d = %s\n",
+ insn->code, insn->dst_reg,
+ __func_imm_name(cbs, insn, imm,
+ tmp, sizeof(tmp)));
} else {
- verbose(env, "BUG_ld_%02x\n", insn->code);
+ verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code);
return;
}
} else if (class == BPF_JMP) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
- verbose(env, "(%02x) call %s#%d\n", insn->code,
- func_id_name(insn->imm), insn->imm);
+ char tmp[64];
+
+ if (insn->src_reg == BPF_PSEUDO_CALL) {
+ verbose(cbs->private_data, "(%02x) call pc%s\n",
+ insn->code,
+ __func_get_name(cbs, insn,
+ tmp, sizeof(tmp)));
+ } else {
+ strcpy(tmp, "unknown");
+ verbose(cbs->private_data, "(%02x) call %s#%d\n", insn->code,
+ __func_get_name(cbs, insn,
+ tmp, sizeof(tmp)),
+ insn->imm);
+ }
} else if (insn->code == (BPF_JMP | BPF_JA)) {
- verbose(env, "(%02x) goto pc%+d\n",
+ verbose(cbs->private_data, "(%02x) goto pc%+d\n",
insn->code, insn->off);
} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
- verbose(env, "(%02x) exit\n", insn->code);
+ verbose(cbs->private_data, "(%02x) exit\n", insn->code);
} else if (BPF_SRC(insn->code) == BPF_X) {
- verbose(env, "(%02x) if r%d %s r%d goto pc%+d\n",
+ verbose(cbs->private_data, "(%02x) if r%d %s r%d goto pc%+d\n",
insn->code, insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
insn->src_reg, insn->off);
} else {
- verbose(env, "(%02x) if r%d %s 0x%x goto pc%+d\n",
+ verbose(cbs->private_data, "(%02x) if r%d %s 0x%x goto pc%+d\n",
insn->code, insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
insn->imm, insn->off);
}
} else {
- verbose(env, "(%02x) %s\n",
+ verbose(cbs->private_data, "(%02x) %s\n",
insn->code, bpf_class_string[class]);
}
}
diff --git a/kernel/bpf/disasm.h b/kernel/bpf/disasm.h
index 8de977e420b6..e1324a834a24 100644
--- a/kernel/bpf/disasm.h
+++ b/kernel/bpf/disasm.h
@@ -17,16 +17,32 @@
#include <linux/bpf.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
+#ifndef __KERNEL__
+#include <stdio.h>
+#include <string.h>
+#endif
extern const char *const bpf_alu_string[16];
extern const char *const bpf_class_string[8];
const char *func_id_name(int id);
-struct bpf_verifier_env;
-typedef void (*bpf_insn_print_cb)(struct bpf_verifier_env *env,
- const char *, ...);
-void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
- const struct bpf_insn *insn, bool allow_ptr_leaks);
+typedef __printf(2, 3) void (*bpf_insn_print_t)(void *private_data,
+ const char *, ...);
+typedef const char *(*bpf_insn_revmap_call_t)(void *private_data,
+ const struct bpf_insn *insn);
+typedef const char *(*bpf_insn_print_imm_t)(void *private_data,
+ const struct bpf_insn *insn,
+ __u64 full_imm);
+
+struct bpf_insn_cbs {
+ bpf_insn_print_t cb_print;
+ bpf_insn_revmap_call_t cb_call;
+ bpf_insn_print_imm_t cb_imm;
+ void *private_data;
+};
+void print_bpf_insn(const struct bpf_insn_cbs *cbs,
+ const struct bpf_insn *insn,
+ bool allow_ptr_leaks);
#endif
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3905d4bc5b80..b76828f23b49 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -227,7 +227,7 @@ static int alloc_extra_elems(struct bpf_htab *htab)
}
/* Called from syscall */
-static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+static int htab_map_alloc_check(union bpf_attr *attr)
{
bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
@@ -241,9 +241,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
int numa_node = bpf_map_attr_numa_node(attr);
- struct bpf_htab *htab;
- int err, i;
- u64 cost;
BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
offsetof(struct htab_elem, hash_node.pprev));
@@ -254,40 +251,68 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
/* LRU implementation is much complicated than other
* maps. Hence, limit to CAP_SYS_ADMIN for now.
*/
- return ERR_PTR(-EPERM);
+ return -EPERM;
if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
/* reserved bits should not be used */
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (!lru && percpu_lru)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (lru && !prealloc)
- return ERR_PTR(-ENOTSUPP);
+ return -ENOTSUPP;
if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
+
+ /* check sanity of attributes.
+ * value_size == 0 may be allowed in the future to use map as a set
+ */
+ if (attr->max_entries == 0 || attr->key_size == 0 ||
+ attr->value_size == 0)
+ return -EINVAL;
+
+ if (attr->key_size > MAX_BPF_STACK)
+ /* eBPF programs initialize keys on stack, so they cannot be
+ * larger than max stack size
+ */
+ return -E2BIG;
+
+ if (attr->value_size >= KMALLOC_MAX_SIZE -
+ MAX_BPF_STACK - sizeof(struct htab_elem))
+ /* if value_size is bigger, the user space won't be able to
+ * access the elements via bpf syscall. This check also makes
+ * sure that the elem_size doesn't overflow and it's
+ * kmalloc-able later in htab_map_update_elem()
+ */
+ return -E2BIG;
+
+ return 0;
+}
+
+static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+{
+ bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
+ attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ /* percpu_lru means each cpu has its own LRU list.
+ * it is different from BPF_MAP_TYPE_PERCPU_HASH where
+ * the map's value itself is percpu. percpu_lru has
+ * nothing to do with the map's value.
+ */
+ bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
+ bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
+ struct bpf_htab *htab;
+ int err, i;
+ u64 cost;
htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab)
return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- htab->map.map_type = attr->map_type;
- htab->map.key_size = attr->key_size;
- htab->map.value_size = attr->value_size;
- htab->map.max_entries = attr->max_entries;
- htab->map.map_flags = attr->map_flags;
- htab->map.numa_node = numa_node;
-
- /* check sanity of attributes.
- * value_size == 0 may be allowed in the future to use map as a set
- */
- err = -EINVAL;
- if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
- htab->map.value_size == 0)
- goto free_htab;
+ bpf_map_init_from_attr(&htab->map, attr);
if (percpu_lru) {
/* ensure each CPU's lru list has >=1 elements.
@@ -304,22 +329,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
/* hash table size must be power of 2 */
htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
- err = -E2BIG;
- if (htab->map.key_size > MAX_BPF_STACK)
- /* eBPF programs initialize keys on stack, so they cannot be
- * larger than max stack size
- */
- goto free_htab;
-
- if (htab->map.value_size >= KMALLOC_MAX_SIZE -
- MAX_BPF_STACK - sizeof(struct htab_elem))
- /* if value_size is bigger, the user space won't be able to
- * access the elements via bpf syscall. This check also makes
- * sure that the elem_size doesn't overflow and it's
- * kmalloc-able later in htab_map_update_elem()
- */
- goto free_htab;
-
htab->elem_size = sizeof(struct htab_elem) +
round_up(htab->map.key_size, 8);
if (percpu)
@@ -327,6 +336,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
else
htab->elem_size += round_up(htab->map.value_size, 8);
+ err = -E2BIG;
/* prevent zero size kmalloc and check for u32 overflow */
if (htab->n_buckets == 0 ||
htab->n_buckets > U32_MAX / sizeof(struct bucket))
@@ -1143,6 +1153,7 @@ static void htab_map_free(struct bpf_map *map)
}
const struct bpf_map_ops htab_map_ops = {
+ .map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -1153,6 +1164,7 @@ const struct bpf_map_ops htab_map_ops = {
};
const struct bpf_map_ops htab_lru_map_ops = {
+ .map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -1236,6 +1248,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
}
const struct bpf_map_ops htab_percpu_map_ops = {
+ .map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -1245,6 +1258,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
};
const struct bpf_map_ops htab_lru_percpu_map_ops = {
+ .map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -1253,11 +1267,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_delete_elem = htab_lru_map_delete_elem,
};
-static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
+static int fd_htab_map_alloc_check(union bpf_attr *attr)
{
if (attr->value_size != sizeof(u32))
- return ERR_PTR(-EINVAL);
- return htab_map_alloc(attr);
+ return -EINVAL;
+ return htab_map_alloc_check(attr);
}
static void fd_htab_map_free(struct bpf_map *map)
@@ -1328,7 +1342,7 @@ static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
if (IS_ERR(inner_map_meta))
return inner_map_meta;
- map = fd_htab_map_alloc(attr);
+ map = htab_map_alloc(attr);
if (IS_ERR(map)) {
bpf_map_meta_free(inner_map_meta);
return map;
@@ -1372,6 +1386,7 @@ static void htab_of_map_free(struct bpf_map *map)
}
const struct bpf_map_ops htab_of_maps_map_ops = {
+ .map_alloc_check = fd_htab_map_alloc_check,
.map_alloc = htab_of_map_alloc,
.map_free = htab_of_map_free,
.map_get_next_key = htab_map_get_next_key,
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 5bb5e49ef4c3..bf6da59ae0d0 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -150,44 +150,37 @@ static int bpf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return 0;
}
-static int bpf_mkobj_ops(struct inode *dir, struct dentry *dentry,
- umode_t mode, const struct inode_operations *iops)
+static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
+ const struct inode_operations *iops)
{
- struct inode *inode;
-
- inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFREG);
+ struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = iops;
- inode->i_private = dentry->d_fsdata;
+ inode->i_private = raw;
bpf_dentry_finalize(dentry, inode, dir);
return 0;
}
-static int bpf_mkobj(struct inode *dir, struct dentry *dentry, umode_t mode,
- dev_t devt)
+static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
{
- enum bpf_type type = MINOR(devt);
-
- if (MAJOR(devt) != UNNAMED_MAJOR || !S_ISREG(mode) ||
- dentry->d_fsdata == NULL)
- return -EPERM;
+ return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops);
+}
- switch (type) {
- case BPF_TYPE_PROG:
- return bpf_mkobj_ops(dir, dentry, mode, &bpf_prog_iops);
- case BPF_TYPE_MAP:
- return bpf_mkobj_ops(dir, dentry, mode, &bpf_map_iops);
- default:
- return -EPERM;
- }
+static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
+{
+ return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops);
}
static struct dentry *
bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
{
+ /* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
+ * extensions.
+ */
if (strchr(dentry->d_name.name, '.'))
return ERR_PTR(-EPERM);
@@ -218,7 +211,6 @@ static int bpf_symlink(struct inode *dir, struct dentry *dentry,
static const struct inode_operations bpf_dir_iops = {
.lookup = bpf_lookup,
- .mknod = bpf_mkobj,
.mkdir = bpf_mkdir,
.symlink = bpf_symlink,
.rmdir = simple_rmdir,
@@ -234,7 +226,6 @@ static int bpf_obj_do_pin(const struct filename *pathname, void *raw,
struct inode *dir;
struct path path;
umode_t mode;
- dev_t devt;
int ret;
dentry = kern_path_create(AT_FDCWD, pathname->name, &path, 0);
@@ -242,9 +233,8 @@ static int bpf_obj_do_pin(const struct filename *pathname, void *raw,
return PTR_ERR(dentry);
mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
- devt = MKDEV(UNNAMED_MAJOR, type);
- ret = security_path_mknod(&path, dentry, mode, devt);
+ ret = security_path_mknod(&path, dentry, mode, 0);
if (ret)
goto out;
@@ -254,9 +244,16 @@ static int bpf_obj_do_pin(const struct filename *pathname, void *raw,
goto out;
}
- dentry->d_fsdata = raw;
- ret = vfs_mknod(dir, dentry, mode, devt);
- dentry->d_fsdata = NULL;
+ switch (type) {
+ case BPF_TYPE_PROG:
+ ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
+ break;
+ case BPF_TYPE_MAP:
+ ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
+ break;
+ default:
+ ret = -EPERM;
+ }
out:
done_path_create(&path, dentry);
return ret;
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 885e45479680..b4b5b81e7251 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -522,12 +522,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM);
/* copy mandatory map attributes */
- trie->map.map_type = attr->map_type;
- trie->map.key_size = attr->key_size;
- trie->map.value_size = attr->value_size;
- trie->map.max_entries = attr->max_entries;
- trie->map.map_flags = attr->map_flags;
- trie->map.numa_node = bpf_map_attr_numa_node(attr);
+ bpf_map_init_from_attr(&trie->map, attr);
trie->data_size = attr->key_size -
offsetof(struct bpf_lpm_trie_key, data);
trie->max_prefixlen = trie->data_size * 8;
@@ -560,7 +555,10 @@ static void trie_free(struct bpf_map *map)
struct lpm_trie_node __rcu **slot;
struct lpm_trie_node *node;
- raw_spin_lock(&trie->lock);
+ /* Wait for outstanding programs to complete
+ * update/lookup/delete/get_next_key and free the trie.
+ */
+ synchronize_rcu();
/* Always start at the root and walk down to a node that has no
* children. Then free that node, nullify its reference in the parent
@@ -571,10 +569,9 @@ static void trie_free(struct bpf_map *map)
slot = &trie->root;
for (;;) {
- node = rcu_dereference_protected(*slot,
- lockdep_is_held(&trie->lock));
+ node = rcu_dereference_protected(*slot, 1);
if (!node)
- goto unlock;
+ goto out;
if (rcu_access_pointer(node->child[0])) {
slot = &node->child[0];
@@ -592,13 +589,100 @@ static void trie_free(struct bpf_map *map)
}
}
-unlock:
- raw_spin_unlock(&trie->lock);
+out:
+ kfree(trie);
}
-static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key)
+static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
{
- return -ENOTSUPP;
+ struct lpm_trie_node *node, *next_node = NULL, *parent, *search_root;
+ struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ struct bpf_lpm_trie_key *key = _key, *next_key = _next_key;
+ struct lpm_trie_node **node_stack = NULL;
+ int err = 0, stack_ptr = -1;
+ unsigned int next_bit;
+ size_t matchlen;
+
+ /* The get_next_key follows postorder. For the 4 node example in
+ * the top of this file, the trie_get_next_key() returns the following
+ * one after another:
+ * 192.168.0.0/24
+ * 192.168.1.0/24
+ * 192.168.128.0/24
+ * 192.168.0.0/16
+ *
+ * The idea is to return more specific keys before less specific ones.
+ */
+
+ /* Empty trie */
+ search_root = rcu_dereference(trie->root);
+ if (!search_root)
+ return -ENOENT;
+
+ /* For invalid key, find the leftmost node in the trie */
+ if (!key || key->prefixlen > trie->max_prefixlen)
+ goto find_leftmost;
+
+ node_stack = kmalloc(trie->max_prefixlen * sizeof(struct lpm_trie_node *),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!node_stack)
+ return -ENOMEM;
+
+ /* Try to find the exact node for the given key */
+ for (node = search_root; node;) {
+ node_stack[++stack_ptr] = node;
+ matchlen = longest_prefix_match(trie, node, key);
+ if (node->prefixlen != matchlen ||
+ node->prefixlen == key->prefixlen)
+ break;
+
+ next_bit = extract_bit(key->data, node->prefixlen);
+ node = rcu_dereference(node->child[next_bit]);
+ }
+ if (!node || node->prefixlen != key->prefixlen ||
+ (node->flags & LPM_TREE_NODE_FLAG_IM))
+ goto find_leftmost;
+
+ /* The node with the exactly-matching key has been found,
+ * find the first node in postorder after the matched node.
+ */
+ node = node_stack[stack_ptr];
+ while (stack_ptr > 0) {
+ parent = node_stack[stack_ptr - 1];
+ if (rcu_dereference(parent->child[0]) == node) {
+ search_root = rcu_dereference(parent->child[1]);
+ if (search_root)
+ goto find_leftmost;
+ }
+ if (!(parent->flags & LPM_TREE_NODE_FLAG_IM)) {
+ next_node = parent;
+ goto do_copy;
+ }
+
+ node = parent;
+ stack_ptr--;
+ }
+
+ /* did not find anything */
+ err = -ENOENT;
+ goto free_stack;
+
+find_leftmost:
+ /* Find the leftmost non-intermediate node, all intermediate nodes
+ * have exact two children, so this function will never return NULL.
+ */
+ for (node = search_root; node;) {
+ if (!(node->flags & LPM_TREE_NODE_FLAG_IM))
+ next_node = node;
+ node = rcu_dereference(node->child[0]);
+ }
+do_copy:
+ next_key->prefixlen = next_node->prefixlen;
+ memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key, data),
+ next_node->data, trie->data_size);
+free_stack:
+ kfree(node_stack);
+ return err;
}
const struct bpf_map_ops trie_map_ops = {
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 8455b89d1bbf..c9401075b58c 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -16,18 +16,35 @@
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/bug.h>
+#include <linux/kdev_t.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
+#include <linux/proc_ns.h>
#include <linux/rtnetlink.h>
+#include <linux/rwsem.h>
-/* protected by RTNL */
+/* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members
+ * of all progs.
+ * RTNL lock cannot be taken when holding this lock.
+ */
+static DECLARE_RWSEM(bpf_devs_lock);
static LIST_HEAD(bpf_prog_offload_devs);
+static LIST_HEAD(bpf_map_offload_devs);
+
+static int bpf_dev_offload_check(struct net_device *netdev)
+{
+ if (!netdev)
+ return -EINVAL;
+ if (!netdev->netdev_ops->ndo_bpf)
+ return -EOPNOTSUPP;
+ return 0;
+}
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
{
- struct net *net = current->nsproxy->net_ns;
- struct bpf_dev_offload *offload;
+ struct bpf_prog_offload *offload;
+ int err;
if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
attr->prog_type != BPF_PROG_TYPE_XDP)
@@ -41,34 +58,44 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
return -ENOMEM;
offload->prog = prog;
- init_waitqueue_head(&offload->verifier_done);
- rtnl_lock();
- offload->netdev = __dev_get_by_index(net, attr->prog_ifindex);
- if (!offload->netdev) {
- rtnl_unlock();
- kfree(offload);
- return -EINVAL;
- }
+ offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
+ attr->prog_ifindex);
+ err = bpf_dev_offload_check(offload->netdev);
+ if (err)
+ goto err_maybe_put;
+ down_write(&bpf_devs_lock);
+ if (offload->netdev->reg_state != NETREG_REGISTERED) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
prog->aux->offload = offload;
list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
- rtnl_unlock();
+ dev_put(offload->netdev);
+ up_write(&bpf_devs_lock);
return 0;
+err_unlock:
+ up_write(&bpf_devs_lock);
+err_maybe_put:
+ if (offload->netdev)
+ dev_put(offload->netdev);
+ kfree(offload);
+ return err;
}
static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
struct netdev_bpf *data)
{
- struct net_device *netdev = prog->aux->offload->netdev;
+ struct bpf_prog_offload *offload = prog->aux->offload;
+ struct net_device *netdev;
ASSERT_RTNL();
- if (!netdev)
+ if (!offload)
return -ENODEV;
- if (!netdev->netdev_ops->ndo_bpf)
- return -EOPNOTSUPP;
+ netdev = offload->netdev;
data->command = cmd;
@@ -87,62 +114,63 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
if (err)
goto exit_unlock;
- env->dev_ops = data.verifier.ops;
-
+ env->prog->aux->offload->dev_ops = data.verifier.ops;
env->prog->aux->offload->dev_state = true;
- env->prog->aux->offload->verifier_running = true;
exit_unlock:
rtnl_unlock();
return err;
}
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx)
+{
+ struct bpf_prog_offload *offload;
+ int ret = -ENODEV;
+
+ down_read(&bpf_devs_lock);
+ offload = env->prog->aux->offload;
+ if (offload)
+ ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{
- struct bpf_dev_offload *offload = prog->aux->offload;
+ struct bpf_prog_offload *offload = prog->aux->offload;
struct netdev_bpf data = {};
- /* Caution - if netdev is destroyed before the program, this function
- * will be called twice.
- */
-
data.offload.prog = prog;
- if (offload->verifier_running)
- wait_event(offload->verifier_done, !offload->verifier_running);
-
if (offload->dev_state)
WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
- offload->dev_state = false;
+ /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
+ bpf_prog_free_id(prog, true);
+
list_del_init(&offload->offloads);
- offload->netdev = NULL;
+ kfree(offload);
+ prog->aux->offload = NULL;
}
void bpf_prog_offload_destroy(struct bpf_prog *prog)
{
- struct bpf_dev_offload *offload = prog->aux->offload;
-
- offload->verifier_running = false;
- wake_up(&offload->verifier_done);
-
rtnl_lock();
- __bpf_prog_offload_destroy(prog);
+ down_write(&bpf_devs_lock);
+ if (prog->aux->offload)
+ __bpf_prog_offload_destroy(prog);
+ up_write(&bpf_devs_lock);
rtnl_unlock();
-
- kfree(offload);
}
static int bpf_prog_offload_translate(struct bpf_prog *prog)
{
- struct bpf_dev_offload *offload = prog->aux->offload;
struct netdev_bpf data = {};
int ret;
data.offload.prog = prog;
- offload->verifier_running = false;
- wake_up(&offload->verifier_done);
-
rtnl_lock();
ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
rtnl_unlock();
@@ -164,14 +192,323 @@ int bpf_prog_offload_compile(struct bpf_prog *prog)
return bpf_prog_offload_translate(prog);
}
+struct ns_get_path_bpf_prog_args {
+ struct bpf_prog *prog;
+ struct bpf_prog_info *info;
+};
+
+static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
+{
+ struct ns_get_path_bpf_prog_args *args = private_data;
+ struct bpf_prog_aux *aux = args->prog->aux;
+ struct ns_common *ns;
+ struct net *net;
+
+ rtnl_lock();
+ down_read(&bpf_devs_lock);
+
+ if (aux->offload) {
+ args->info->ifindex = aux->offload->netdev->ifindex;
+ net = dev_net(aux->offload->netdev);
+ get_net(net);
+ ns = &net->ns;
+ } else {
+ args->info->ifindex = 0;
+ ns = NULL;
+ }
+
+ up_read(&bpf_devs_lock);
+ rtnl_unlock();
+
+ return ns;
+}
+
+int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+ struct bpf_prog *prog)
+{
+ struct ns_get_path_bpf_prog_args args = {
+ .prog = prog,
+ .info = info,
+ };
+ struct bpf_prog_aux *aux = prog->aux;
+ struct inode *ns_inode;
+ struct path ns_path;
+ char __user *uinsns;
+ void *res;
+ u32 ulen;
+
+ res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
+ if (IS_ERR(res)) {
+ if (!info->ifindex)
+ return -ENODEV;
+ return PTR_ERR(res);
+ }
+
+ down_read(&bpf_devs_lock);
+
+ if (!aux->offload) {
+ up_read(&bpf_devs_lock);
+ return -ENODEV;
+ }
+
+ ulen = info->jited_prog_len;
+ info->jited_prog_len = aux->offload->jited_len;
+ if (info->jited_prog_len & ulen) {
+ uinsns = u64_to_user_ptr(info->jited_prog_insns);
+ ulen = min_t(u32, info->jited_prog_len, ulen);
+ if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
+ up_read(&bpf_devs_lock);
+ return -EFAULT;
+ }
+ }
+
+ up_read(&bpf_devs_lock);
+
+ ns_inode = ns_path.dentry->d_inode;
+ info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
+ info->netns_ino = ns_inode->i_ino;
+ path_put(&ns_path);
+
+ return 0;
+}
+
const struct bpf_prog_ops bpf_offload_prog_ops = {
};
+static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
+ enum bpf_netdev_command cmd)
+{
+ struct netdev_bpf data = {};
+ struct net_device *netdev;
+
+ ASSERT_RTNL();
+
+ data.command = cmd;
+ data.offmap = offmap;
+ /* Caller must make sure netdev is valid */
+ netdev = offmap->netdev;
+
+ return netdev->netdev_ops->ndo_bpf(netdev, &data);
+}
+
+struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct bpf_offloaded_map *offmap;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+ if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
+ attr->map_type != BPF_MAP_TYPE_HASH)
+ return ERR_PTR(-EINVAL);
+
+ offmap = kzalloc(sizeof(*offmap), GFP_USER);
+ if (!offmap)
+ return ERR_PTR(-ENOMEM);
+
+ bpf_map_init_from_attr(&offmap->map, attr);
+
+ rtnl_lock();
+ down_write(&bpf_devs_lock);
+ offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
+ err = bpf_dev_offload_check(offmap->netdev);
+ if (err)
+ goto err_unlock;
+
+ err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
+ if (err)
+ goto err_unlock;
+
+ list_add_tail(&offmap->offloads, &bpf_map_offload_devs);
+ up_write(&bpf_devs_lock);
+ rtnl_unlock();
+
+ return &offmap->map;
+
+err_unlock:
+ up_write(&bpf_devs_lock);
+ rtnl_unlock();
+ kfree(offmap);
+ return ERR_PTR(err);
+}
+
+static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
+{
+ WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
+ /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
+ bpf_map_free_id(&offmap->map, true);
+ list_del_init(&offmap->offloads);
+ offmap->netdev = NULL;
+}
+
+void bpf_map_offload_map_free(struct bpf_map *map)
+{
+ struct bpf_offloaded_map *offmap = map_to_offmap(map);
+
+ rtnl_lock();
+ down_write(&bpf_devs_lock);
+ if (offmap->netdev)
+ __bpf_map_offload_destroy(offmap);
+ up_write(&bpf_devs_lock);
+ rtnl_unlock();
+
+ kfree(offmap);
+}
+
+int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
+{
+ struct bpf_offloaded_map *offmap = map_to_offmap(map);
+ int ret = -ENODEV;
+
+ down_read(&bpf_devs_lock);
+ if (offmap->netdev)
+ ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
+int bpf_map_offload_update_elem(struct bpf_map *map,
+ void *key, void *value, u64 flags)
+{
+ struct bpf_offloaded_map *offmap = map_to_offmap(map);
+ int ret = -ENODEV;
+
+ if (unlikely(flags > BPF_EXIST))
+ return -EINVAL;
+
+ down_read(&bpf_devs_lock);
+ if (offmap->netdev)
+ ret = offmap->dev_ops->map_update_elem(offmap, key, value,
+ flags);
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
+int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_offloaded_map *offmap = map_to_offmap(map);
+ int ret = -ENODEV;
+
+ down_read(&bpf_devs_lock);
+ if (offmap->netdev)
+ ret = offmap->dev_ops->map_delete_elem(offmap, key);
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
+int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
+{
+ struct bpf_offloaded_map *offmap = map_to_offmap(map);
+ int ret = -ENODEV;
+
+ down_read(&bpf_devs_lock);
+ if (offmap->netdev)
+ ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
+struct ns_get_path_bpf_map_args {
+ struct bpf_offloaded_map *offmap;
+ struct bpf_map_info *info;
+};
+
+static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
+{
+ struct ns_get_path_bpf_map_args *args = private_data;
+ struct ns_common *ns;
+ struct net *net;
+
+ rtnl_lock();
+ down_read(&bpf_devs_lock);
+
+ if (args->offmap->netdev) {
+ args->info->ifindex = args->offmap->netdev->ifindex;
+ net = dev_net(args->offmap->netdev);
+ get_net(net);
+ ns = &net->ns;
+ } else {
+ args->info->ifindex = 0;
+ ns = NULL;
+ }
+
+ up_read(&bpf_devs_lock);
+ rtnl_unlock();
+
+ return ns;
+}
+
+int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
+{
+ struct ns_get_path_bpf_map_args args = {
+ .offmap = map_to_offmap(map),
+ .info = info,
+ };
+ struct inode *ns_inode;
+ struct path ns_path;
+ void *res;
+
+ res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
+ if (IS_ERR(res)) {
+ if (!info->ifindex)
+ return -ENODEV;
+ return PTR_ERR(res);
+ }
+
+ ns_inode = ns_path.dentry->d_inode;
+ info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
+ info->netns_ino = ns_inode->i_ino;
+ path_put(&ns_path);
+
+ return 0;
+}
+
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map)
+{
+ struct bpf_offloaded_map *offmap;
+ struct bpf_prog_offload *offload;
+ bool ret;
+
+ if (!bpf_prog_is_dev_bound(prog->aux) || !bpf_map_is_dev_bound(map))
+ return false;
+
+ down_read(&bpf_devs_lock);
+ offload = prog->aux->offload;
+ offmap = map_to_offmap(map);
+
+ ret = offload && offload->netdev == offmap->netdev;
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
+static void bpf_offload_orphan_all_progs(struct net_device *netdev)
+{
+ struct bpf_prog_offload *offload, *tmp;
+
+ list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads)
+ if (offload->netdev == netdev)
+ __bpf_prog_offload_destroy(offload->prog);
+}
+
+static void bpf_offload_orphan_all_maps(struct net_device *netdev)
+{
+ struct bpf_offloaded_map *offmap, *tmp;
+
+ list_for_each_entry_safe(offmap, tmp, &bpf_map_offload_devs, offloads)
+ if (offmap->netdev == netdev)
+ __bpf_map_offload_destroy(offmap);
+}
+
static int bpf_offload_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- struct bpf_dev_offload *offload, *tmp;
ASSERT_RTNL();
@@ -181,11 +518,10 @@ static int bpf_offload_notification(struct notifier_block *notifier,
if (netdev->reg_state != NETREG_UNREGISTERING)
break;
- list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
- offloads) {
- if (offload->netdev == netdev)
- __bpf_prog_offload_destroy(offload->prog);
- }
+ down_write(&bpf_devs_lock);
+ bpf_offload_orphan_all_progs(netdev);
+ bpf_offload_orphan_all_maps(netdev);
+ up_write(&bpf_devs_lock);
break;
default:
break;
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 1712d319c2d8..8dd9210d7db7 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -38,8 +38,11 @@
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/list.h>
+#include <linux/mm.h>
#include <net/strparser.h>
#include <net/tcp.h>
+#include <linux/ptr_ring.h>
+#include <net/inet_common.h>
#define SOCK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
@@ -47,6 +50,7 @@
struct bpf_stab {
struct bpf_map map;
struct sock **sock_map;
+ struct bpf_prog *bpf_tx_msg;
struct bpf_prog *bpf_parse;
struct bpf_prog *bpf_verdict;
};
@@ -62,8 +66,7 @@ struct smap_psock_map_entry {
struct smap_psock {
struct rcu_head rcu;
- /* refcnt is used inside sk_callback_lock */
- u32 refcnt;
+ refcount_t refcnt;
/* datapath variables */
struct sk_buff_head rxqueue;
@@ -74,7 +77,17 @@ struct smap_psock {
int save_off;
struct sk_buff *save_skb;
+ /* datapath variables for tx_msg ULP */
+ struct sock *sk_redir;
+ int apply_bytes;
+ int cork_bytes;
+ int sg_size;
+ int eval;
+ struct sk_msg_buff *cork;
+ struct list_head ingress;
+
struct strparser strp;
+ struct bpf_prog *bpf_tx_msg;
struct bpf_prog *bpf_parse;
struct bpf_prog *bpf_verdict;
struct list_head maps;
@@ -86,30 +99,939 @@ struct smap_psock {
struct work_struct tx_work;
struct work_struct gc_work;
+ struct proto *sk_proto;
+ void (*save_close)(struct sock *sk, long timeout);
void (*save_data_ready)(struct sock *sk);
void (*save_write_space)(struct sock *sk);
- void (*save_state_change)(struct sock *sk);
};
+static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
+static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags, int *addr_len);
+static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+
static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
{
return rcu_dereference_sk_user_data(sk);
}
-/* compute the linear packet data range [data, data_end) for skb when
- * sk_skb type programs are in use.
- */
-static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
+static bool bpf_tcp_stream_read(const struct sock *sk)
+{
+ struct smap_psock *psock;
+ bool empty = true;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock))
+ goto out;
+ empty = list_empty(&psock->ingress);
+out:
+ rcu_read_unlock();
+ return !empty;
+}
+
+static struct proto tcp_bpf_proto;
+static int bpf_tcp_init(struct sock *sk)
+{
+ struct smap_psock *psock;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ if (unlikely(psock->sk_proto)) {
+ rcu_read_unlock();
+ return -EBUSY;
+ }
+
+ psock->save_close = sk->sk_prot->close;
+ psock->sk_proto = sk->sk_prot;
+
+ if (psock->bpf_tx_msg) {
+ tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
+ tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
+ tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
+ tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
+ }
+
+ sk->sk_prot = &tcp_bpf_proto;
+ rcu_read_unlock();
+ return 0;
+}
+
+static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
+static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
+
+static void bpf_tcp_release(struct sock *sk)
+{
+ struct smap_psock *psock;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock))
+ goto out;
+
+ if (psock->cork) {
+ free_start_sg(psock->sock, psock->cork);
+ kfree(psock->cork);
+ psock->cork = NULL;
+ }
+
+ if (psock->sk_proto) {
+ sk->sk_prot = psock->sk_proto;
+ psock->sk_proto = NULL;
+ }
+out:
+ rcu_read_unlock();
+}
+
+static void bpf_tcp_close(struct sock *sk, long timeout)
{
- TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
+ void (*close_fun)(struct sock *sk, long timeout);
+ struct smap_psock_map_entry *e, *tmp;
+ struct sk_msg_buff *md, *mtmp;
+ struct smap_psock *psock;
+ struct sock *osk;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ return sk->sk_prot->close(sk, timeout);
+ }
+
+ /* The psock may be destroyed anytime after exiting the RCU critial
+ * section so by the time we use close_fun the psock may no longer
+ * be valid. However, bpf_tcp_close is called with the sock lock
+ * held so the close hook and sk are still valid.
+ */
+ close_fun = psock->save_close;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (psock->cork) {
+ free_start_sg(psock->sock, psock->cork);
+ kfree(psock->cork);
+ psock->cork = NULL;
+ }
+
+ list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
+ list_del(&md->list);
+ free_start_sg(psock->sock, md);
+ kfree(md);
+ }
+
+ list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+ osk = cmpxchg(e->entry, sk, NULL);
+ if (osk == sk) {
+ list_del(&e->list);
+ smap_release_sock(psock, sk);
+ }
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+ rcu_read_unlock();
+ close_fun(sk, timeout);
}
enum __sk_action {
__SK_DROP = 0,
__SK_PASS,
__SK_REDIRECT,
+ __SK_NONE,
};
+static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
+ .name = "bpf_tcp",
+ .uid = TCP_ULP_BPF,
+ .user_visible = false,
+ .owner = NULL,
+ .init = bpf_tcp_init,
+ .release = bpf_tcp_release,
+};
+
+static int memcopy_from_iter(struct sock *sk,
+ struct sk_msg_buff *md,
+ struct iov_iter *from, int bytes)
+{
+ struct scatterlist *sg = md->sg_data;
+ int i = md->sg_curr, rc = -ENOSPC;
+
+ do {
+ int copy;
+ char *to;
+
+ if (md->sg_copybreak >= sg[i].length) {
+ md->sg_copybreak = 0;
+
+ if (++i == MAX_SKB_FRAGS)
+ i = 0;
+
+ if (i == md->sg_end)
+ break;
+ }
+
+ copy = sg[i].length - md->sg_copybreak;
+ to = sg_virt(&sg[i]) + md->sg_copybreak;
+ md->sg_copybreak += copy;
+
+ if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
+ rc = copy_from_iter_nocache(to, copy, from);
+ else
+ rc = copy_from_iter(to, copy, from);
+
+ if (rc != copy) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ bytes -= copy;
+ if (!bytes)
+ break;
+
+ md->sg_copybreak = 0;
+ if (++i == MAX_SKB_FRAGS)
+ i = 0;
+ } while (i != md->sg_end);
+out:
+ md->sg_curr = i;
+ return rc;
+}
+
+static int bpf_tcp_push(struct sock *sk, int apply_bytes,
+ struct sk_msg_buff *md,
+ int flags, bool uncharge)
+{
+ bool apply = apply_bytes;
+ struct scatterlist *sg;
+ int offset, ret = 0;
+ struct page *p;
+ size_t size;
+
+ while (1) {
+ sg = md->sg_data + md->sg_start;
+ size = (apply && apply_bytes < sg->length) ?
+ apply_bytes : sg->length;
+ offset = sg->offset;
+
+ tcp_rate_check_app_limited(sk);
+ p = sg_page(sg);
+retry:
+ ret = do_tcp_sendpages(sk, p, offset, size, flags);
+ if (ret != size) {
+ if (ret > 0) {
+ if (apply)
+ apply_bytes -= ret;
+ size -= ret;
+ offset += ret;
+ if (uncharge)
+ sk_mem_uncharge(sk, ret);
+ goto retry;
+ }
+
+ sg->length = size;
+ sg->offset = offset;
+ return ret;
+ }
+
+ if (apply)
+ apply_bytes -= ret;
+ sg->offset += ret;
+ sg->length -= ret;
+ if (uncharge)
+ sk_mem_uncharge(sk, ret);
+
+ if (!sg->length) {
+ put_page(p);
+ md->sg_start++;
+ if (md->sg_start == MAX_SKB_FRAGS)
+ md->sg_start = 0;
+ sg_init_table(sg, 1);
+
+ if (md->sg_start == md->sg_end)
+ break;
+ }
+
+ if (apply && !apply_bytes)
+ break;
+ }
+ return 0;
+}
+
+static inline void bpf_compute_data_pointers_sg(struct sk_msg_buff *md)
+{
+ struct scatterlist *sg = md->sg_data + md->sg_start;
+
+ if (md->sg_copy[md->sg_start]) {
+ md->data = md->data_end = 0;
+ } else {
+ md->data = sg_virt(sg);
+ md->data_end = md->data + sg->length;
+ }
+}
+
+static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
+{
+ struct scatterlist *sg = md->sg_data;
+ int i = md->sg_start;
+
+ do {
+ int uncharge = (bytes < sg[i].length) ? bytes : sg[i].length;
+
+ sk_mem_uncharge(sk, uncharge);
+ bytes -= uncharge;
+ if (!bytes)
+ break;
+ i++;
+ if (i == MAX_SKB_FRAGS)
+ i = 0;
+ } while (i != md->sg_end);
+}
+
+static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
+{
+ struct scatterlist *sg = md->sg_data;
+ int i = md->sg_start, free;
+
+ while (bytes && sg[i].length) {
+ free = sg[i].length;
+ if (bytes < free) {
+ sg[i].length -= bytes;
+ sg[i].offset += bytes;
+ sk_mem_uncharge(sk, bytes);
+ break;
+ }
+
+ sk_mem_uncharge(sk, sg[i].length);
+ put_page(sg_page(&sg[i]));
+ bytes -= sg[i].length;
+ sg[i].length = 0;
+ sg[i].page_link = 0;
+ sg[i].offset = 0;
+ i++;
+
+ if (i == MAX_SKB_FRAGS)
+ i = 0;
+ }
+}
+
+static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
+{
+ struct scatterlist *sg = md->sg_data;
+ int i = start, free = 0;
+
+ while (sg[i].length) {
+ free += sg[i].length;
+ sk_mem_uncharge(sk, sg[i].length);
+ put_page(sg_page(&sg[i]));
+ sg[i].length = 0;
+ sg[i].page_link = 0;
+ sg[i].offset = 0;
+ i++;
+
+ if (i == MAX_SKB_FRAGS)
+ i = 0;
+ }
+
+ return free;
+}
+
+static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
+{
+ int free = free_sg(sk, md->sg_start, md);
+
+ md->sg_start = md->sg_end;
+ return free;
+}
+
+static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
+{
+ return free_sg(sk, md->sg_curr, md);
+}
+
+static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
+{
+ return ((_rc == SK_PASS) ?
+ (md->map ? __SK_REDIRECT : __SK_PASS) :
+ __SK_DROP);
+}
+
+static unsigned int smap_do_tx_msg(struct sock *sk,
+ struct smap_psock *psock,
+ struct sk_msg_buff *md)
+{
+ struct bpf_prog *prog;
+ unsigned int rc, _rc;
+
+ preempt_disable();
+ rcu_read_lock();
+
+ /* If the policy was removed mid-send then default to 'accept' */
+ prog = READ_ONCE(psock->bpf_tx_msg);
+ if (unlikely(!prog)) {
+ _rc = SK_PASS;
+ goto verdict;
+ }
+
+ bpf_compute_data_pointers_sg(md);
+ rc = (*prog->bpf_func)(md, prog->insnsi);
+ psock->apply_bytes = md->apply_bytes;
+
+ /* Moving return codes from UAPI namespace into internal namespace */
+ _rc = bpf_map_msg_verdict(rc, md);
+
+ /* The psock has a refcount on the sock but not on the map and because
+ * we need to drop rcu read lock here its possible the map could be
+ * removed between here and when we need it to execute the sock
+ * redirect. So do the map lookup now for future use.
+ */
+ if (_rc == __SK_REDIRECT) {
+ if (psock->sk_redir)
+ sock_put(psock->sk_redir);
+ psock->sk_redir = do_msg_redirect_map(md);
+ if (!psock->sk_redir) {
+ _rc = __SK_DROP;
+ goto verdict;
+ }
+ sock_hold(psock->sk_redir);
+ }
+verdict:
+ rcu_read_unlock();
+ preempt_enable();
+
+ return _rc;
+}
+
+static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
+ struct smap_psock *psock,
+ struct sk_msg_buff *md, int flags)
+{
+ bool apply = apply_bytes;
+ size_t size, copied = 0;
+ struct sk_msg_buff *r;
+ int err = 0, i;
+
+ r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_KERNEL);
+ if (unlikely(!r))
+ return -ENOMEM;
+
+ lock_sock(sk);
+ r->sg_start = md->sg_start;
+ i = md->sg_start;
+
+ do {
+ r->sg_data[i] = md->sg_data[i];
+
+ size = (apply && apply_bytes < md->sg_data[i].length) ?
+ apply_bytes : md->sg_data[i].length;
+
+ if (!sk_wmem_schedule(sk, size)) {
+ if (!copied)
+ err = -ENOMEM;
+ break;
+ }
+
+ sk_mem_charge(sk, size);
+ r->sg_data[i].length = size;
+ md->sg_data[i].length -= size;
+ md->sg_data[i].offset += size;
+ copied += size;
+
+ if (md->sg_data[i].length) {
+ get_page(sg_page(&r->sg_data[i]));
+ r->sg_end = (i + 1) == MAX_SKB_FRAGS ? 0 : i + 1;
+ } else {
+ i++;
+ if (i == MAX_SKB_FRAGS)
+ i = 0;
+ r->sg_end = i;
+ }
+
+ if (apply) {
+ apply_bytes -= size;
+ if (!apply_bytes)
+ break;
+ }
+ } while (i != md->sg_end);
+
+ md->sg_start = i;
+
+ if (!err) {
+ list_add_tail(&r->list, &psock->ingress);
+ sk->sk_data_ready(sk);
+ } else {
+ free_start_sg(sk, r);
+ kfree(r);
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
+ struct sk_msg_buff *md,
+ int flags)
+{
+ struct smap_psock *psock;
+ struct scatterlist *sg;
+ int i, err, free = 0;
+ bool ingress = !!(md->flags & BPF_F_INGRESS);
+
+ sg = md->sg_data;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock))
+ goto out_rcu;
+
+ if (!refcount_inc_not_zero(&psock->refcnt))
+ goto out_rcu;
+
+ rcu_read_unlock();
+
+ if (ingress) {
+ err = bpf_tcp_ingress(sk, send, psock, md, flags);
+ } else {
+ lock_sock(sk);
+ err = bpf_tcp_push(sk, send, md, flags, false);
+ release_sock(sk);
+ }
+ smap_release_sock(psock, sk);
+ if (unlikely(err))
+ goto out;
+ return 0;
+out_rcu:
+ rcu_read_unlock();
+out:
+ i = md->sg_start;
+ while (sg[i].length) {
+ free += sg[i].length;
+ put_page(sg_page(&sg[i]));
+ sg[i].length = 0;
+ i++;
+ if (i == MAX_SKB_FRAGS)
+ i = 0;
+ }
+ return free;
+}
+
+static inline void bpf_md_init(struct smap_psock *psock)
+{
+ if (!psock->apply_bytes) {
+ psock->eval = __SK_NONE;
+ if (psock->sk_redir) {
+ sock_put(psock->sk_redir);
+ psock->sk_redir = NULL;
+ }
+ }
+}
+
+static void apply_bytes_dec(struct smap_psock *psock, int i)
+{
+ if (psock->apply_bytes) {
+ if (psock->apply_bytes < i)
+ psock->apply_bytes = 0;
+ else
+ psock->apply_bytes -= i;
+ }
+}
+
+static int bpf_exec_tx_verdict(struct smap_psock *psock,
+ struct sk_msg_buff *m,
+ struct sock *sk,
+ int *copied, int flags)
+{
+ bool cork = false, enospc = (m->sg_start == m->sg_end);
+ struct sock *redir;
+ int err = 0;
+ int send;
+
+more_data:
+ if (psock->eval == __SK_NONE)
+ psock->eval = smap_do_tx_msg(sk, psock, m);
+
+ if (m->cork_bytes &&
+ m->cork_bytes > psock->sg_size && !enospc) {
+ psock->cork_bytes = m->cork_bytes - psock->sg_size;
+ if (!psock->cork) {
+ psock->cork = kcalloc(1,
+ sizeof(struct sk_msg_buff),
+ GFP_ATOMIC | __GFP_NOWARN);
+
+ if (!psock->cork) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ }
+ memcpy(psock->cork, m, sizeof(*m));
+ goto out_err;
+ }
+
+ send = psock->sg_size;
+ if (psock->apply_bytes && psock->apply_bytes < send)
+ send = psock->apply_bytes;
+
+ switch (psock->eval) {
+ case __SK_PASS:
+ err = bpf_tcp_push(sk, send, m, flags, true);
+ if (unlikely(err)) {
+ *copied -= free_start_sg(sk, m);
+ break;
+ }
+
+ apply_bytes_dec(psock, send);
+ psock->sg_size -= send;
+ break;
+ case __SK_REDIRECT:
+ redir = psock->sk_redir;
+ apply_bytes_dec(psock, send);
+
+ if (psock->cork) {
+ cork = true;
+ psock->cork = NULL;
+ }
+
+ return_mem_sg(sk, send, m);
+ release_sock(sk);
+
+ err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
+ lock_sock(sk);
+
+ if (cork) {
+ free_start_sg(sk, m);
+ kfree(m);
+ m = NULL;
+ }
+ if (unlikely(err))
+ *copied -= err;
+ else
+ psock->sg_size -= send;
+ break;
+ case __SK_DROP:
+ default:
+ free_bytes_sg(sk, send, m);
+ apply_bytes_dec(psock, send);
+ *copied -= send;
+ psock->sg_size -= send;
+ err = -EACCES;
+ break;
+ }
+
+ if (likely(!err)) {
+ bpf_md_init(psock);
+ if (m &&
+ m->sg_data[m->sg_start].page_link &&
+ m->sg_data[m->sg_start].length)
+ goto more_data;
+ }
+
+out_err:
+ return err;
+}
+
+static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags, int *addr_len)
+{
+ struct iov_iter *iter = &msg->msg_iter;
+ struct smap_psock *psock;
+ int copied = 0;
+
+ if (unlikely(flags & MSG_ERRQUEUE))
+ return inet_recv_error(sk, msg, len, addr_len);
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock))
+ goto out;
+
+ if (unlikely(!refcount_inc_not_zero(&psock->refcnt)))
+ goto out;
+ rcu_read_unlock();
+
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+
+ lock_sock(sk);
+ while (copied != len) {
+ struct scatterlist *sg;
+ struct sk_msg_buff *md;
+ int i;
+
+ md = list_first_entry_or_null(&psock->ingress,
+ struct sk_msg_buff, list);
+ if (unlikely(!md))
+ break;
+ i = md->sg_start;
+ do {
+ struct page *page;
+ int n, copy;
+
+ sg = &md->sg_data[i];
+ copy = sg->length;
+ page = sg_page(sg);
+
+ if (copied + copy > len)
+ copy = len - copied;
+
+ n = copy_page_to_iter(page, sg->offset, copy, iter);
+ if (n != copy) {
+ md->sg_start = i;
+ release_sock(sk);
+ smap_release_sock(psock, sk);
+ return -EFAULT;
+ }
+
+ copied += copy;
+ sg->offset += copy;
+ sg->length -= copy;
+ sk_mem_uncharge(sk, copy);
+
+ if (!sg->length) {
+ i++;
+ if (i == MAX_SKB_FRAGS)
+ i = 0;
+ if (!md->skb)
+ put_page(page);
+ }
+ if (copied == len)
+ break;
+ } while (i != md->sg_end);
+ md->sg_start = i;
+
+ if (!sg->length && md->sg_start == md->sg_end) {
+ list_del(&md->list);
+ if (md->skb)
+ consume_skb(md->skb);
+ kfree(md);
+ }
+ }
+
+ release_sock(sk);
+ smap_release_sock(psock, sk);
+ return copied;
+out:
+ rcu_read_unlock();
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+}
+
+
+static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+{
+ int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
+ struct sk_msg_buff md = {0};
+ unsigned int sg_copy = 0;
+ struct smap_psock *psock;
+ int copied = 0, err = 0;
+ struct scatterlist *sg;
+ long timeo;
+
+ /* Its possible a sock event or user removed the psock _but_ the ops
+ * have not been reprogrammed yet so we get here. In this case fallback
+ * to tcp_sendmsg. Note this only works because we _only_ ever allow
+ * a single ULP there is no hierarchy here.
+ */
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ return tcp_sendmsg(sk, msg, size);
+ }
+
+ /* Increment the psock refcnt to ensure its not released while sending a
+ * message. Required because sk lookup and bpf programs are used in
+ * separate rcu critical sections. Its OK if we lose the map entry
+ * but we can't lose the sock reference.
+ */
+ if (!refcount_inc_not_zero(&psock->refcnt)) {
+ rcu_read_unlock();
+ return tcp_sendmsg(sk, msg, size);
+ }
+
+ sg = md.sg_data;
+ sg_init_marker(sg, MAX_SKB_FRAGS);
+ rcu_read_unlock();
+
+ lock_sock(sk);
+ timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+
+ while (msg_data_left(msg)) {
+ struct sk_msg_buff *m;
+ bool enospc = false;
+ int copy;
+
+ if (sk->sk_err) {
+ err = sk->sk_err;
+ goto out_err;
+ }
+
+ copy = msg_data_left(msg);
+ if (!sk_stream_memory_free(sk))
+ goto wait_for_sndbuf;
+
+ m = psock->cork_bytes ? psock->cork : &md;
+ m->sg_curr = m->sg_copybreak ? m->sg_curr : m->sg_end;
+ err = sk_alloc_sg(sk, copy, m->sg_data,
+ m->sg_start, &m->sg_end, &sg_copy,
+ m->sg_end - 1);
+ if (err) {
+ if (err != -ENOSPC)
+ goto wait_for_memory;
+ enospc = true;
+ copy = sg_copy;
+ }
+
+ err = memcopy_from_iter(sk, m, &msg->msg_iter, copy);
+ if (err < 0) {
+ free_curr_sg(sk, m);
+ goto out_err;
+ }
+
+ psock->sg_size += copy;
+ copied += copy;
+ sg_copy = 0;
+
+ /* When bytes are being corked skip running BPF program and
+ * applying verdict unless there is no more buffer space. In
+ * the ENOSPC case simply run BPF prorgram with currently
+ * accumulated data. We don't have much choice at this point
+ * we could try extending the page frags or chaining complex
+ * frags but even in these cases _eventually_ we will hit an
+ * OOM scenario. More complex recovery schemes may be
+ * implemented in the future, but BPF programs must handle
+ * the case where apply_cork requests are not honored. The
+ * canonical method to verify this is to check data length.
+ */
+ if (psock->cork_bytes) {
+ if (copy > psock->cork_bytes)
+ psock->cork_bytes = 0;
+ else
+ psock->cork_bytes -= copy;
+
+ if (psock->cork_bytes && !enospc)
+ goto out_cork;
+
+ /* All cork bytes accounted for re-run filter */
+ psock->eval = __SK_NONE;
+ psock->cork_bytes = 0;
+ }
+
+ err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
+ if (unlikely(err < 0))
+ goto out_err;
+ continue;
+wait_for_sndbuf:
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+wait_for_memory:
+ err = sk_stream_wait_memory(sk, &timeo);
+ if (err)
+ goto out_err;
+ }
+out_err:
+ if (err < 0)
+ err = sk_stream_error(sk, msg->msg_flags, err);
+out_cork:
+ release_sock(sk);
+ smap_release_sock(psock, sk);
+ return copied ? copied : err;
+}
+
+static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+{
+ struct sk_msg_buff md = {0}, *m = NULL;
+ int err = 0, copied = 0;
+ struct smap_psock *psock;
+ struct scatterlist *sg;
+ bool enospc = false;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock))
+ goto accept;
+
+ if (!refcount_inc_not_zero(&psock->refcnt))
+ goto accept;
+ rcu_read_unlock();
+
+ lock_sock(sk);
+
+ if (psock->cork_bytes) {
+ m = psock->cork;
+ sg = &m->sg_data[m->sg_end];
+ } else {
+ m = &md;
+ sg = m->sg_data;
+ sg_init_marker(sg, MAX_SKB_FRAGS);
+ }
+
+ /* Catch case where ring is full and sendpage is stalled. */
+ if (unlikely(m->sg_end == m->sg_start &&
+ m->sg_data[m->sg_end].length))
+ goto out_err;
+
+ psock->sg_size += size;
+ sg_set_page(sg, page, size, offset);
+ get_page(page);
+ m->sg_copy[m->sg_end] = true;
+ sk_mem_charge(sk, size);
+ m->sg_end++;
+ copied = size;
+
+ if (m->sg_end == MAX_SKB_FRAGS)
+ m->sg_end = 0;
+
+ if (m->sg_end == m->sg_start)
+ enospc = true;
+
+ if (psock->cork_bytes) {
+ if (size > psock->cork_bytes)
+ psock->cork_bytes = 0;
+ else
+ psock->cork_bytes -= size;
+
+ if (psock->cork_bytes && !enospc)
+ goto out_err;
+
+ /* All cork bytes accounted for re-run filter */
+ psock->eval = __SK_NONE;
+ psock->cork_bytes = 0;
+ }
+
+ err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
+out_err:
+ release_sock(sk);
+ smap_release_sock(psock, sk);
+ return copied ? copied : err;
+accept:
+ rcu_read_unlock();
+ return tcp_sendpage(sk, page, offset, size, flags);
+}
+
+static void bpf_tcp_msg_add(struct smap_psock *psock,
+ struct sock *sk,
+ struct bpf_prog *tx_msg)
+{
+ struct bpf_prog *orig_tx_msg;
+
+ orig_tx_msg = xchg(&psock->bpf_tx_msg, tx_msg);
+ if (orig_tx_msg)
+ bpf_prog_put(orig_tx_msg);
+}
+
+static int bpf_tcp_ulp_register(void)
+{
+ tcp_bpf_proto = tcp_prot;
+ tcp_bpf_proto.close = bpf_tcp_close;
+ /* Once BPF TX ULP is registered it is never unregistered. It
+ * will be in the ULP list for the lifetime of the system. Doing
+ * duplicate registers is not a problem.
+ */
+ return tcp_register_ulp(&bpf_tcp_ulp_ops);
+}
+
static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
{
struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
@@ -137,27 +1059,72 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
__SK_DROP;
}
+static int smap_do_ingress(struct smap_psock *psock, struct sk_buff *skb)
+{
+ struct sock *sk = psock->sock;
+ int copied = 0, num_sg;
+ struct sk_msg_buff *r;
+
+ r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_ATOMIC);
+ if (unlikely(!r))
+ return -EAGAIN;
+
+ if (!sk_rmem_schedule(sk, skb, skb->len)) {
+ kfree(r);
+ return -EAGAIN;
+ }
+
+ sg_init_table(r->sg_data, MAX_SKB_FRAGS);
+ num_sg = skb_to_sgvec(skb, r->sg_data, 0, skb->len);
+ if (unlikely(num_sg < 0)) {
+ kfree(r);
+ return num_sg;
+ }
+ sk_mem_charge(sk, skb->len);
+ copied = skb->len;
+ r->sg_start = 0;
+ r->sg_end = num_sg == MAX_SKB_FRAGS ? 0 : num_sg;
+ r->skb = skb;
+ list_add_tail(&r->list, &psock->ingress);
+ sk->sk_data_ready(sk);
+ return copied;
+}
+
static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
{
+ struct smap_psock *peer;
struct sock *sk;
+ __u32 in;
int rc;
rc = smap_verdict_func(psock, skb);
switch (rc) {
case __SK_REDIRECT:
sk = do_sk_redirect_map(skb);
- if (likely(sk)) {
- struct smap_psock *peer = smap_psock_sk(sk);
-
- if (likely(peer &&
- test_bit(SMAP_TX_RUNNING, &peer->state) &&
- !sock_flag(sk, SOCK_DEAD) &&
- sock_writeable(sk))) {
- skb_set_owner_w(skb, sk);
- skb_queue_tail(&peer->rxqueue, skb);
- schedule_work(&peer->tx_work);
- break;
- }
+ if (!sk) {
+ kfree_skb(skb);
+ break;
+ }
+
+ peer = smap_psock_sk(sk);
+ in = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
+
+ if (unlikely(!peer || sock_flag(sk, SOCK_DEAD) ||
+ !test_bit(SMAP_TX_RUNNING, &peer->state))) {
+ kfree_skb(skb);
+ break;
+ }
+
+ if (!in && sock_writeable(sk)) {
+ skb_set_owner_w(skb, sk);
+ skb_queue_tail(&peer->rxqueue, skb);
+ schedule_work(&peer->tx_work);
+ break;
+ } else if (in &&
+ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
+ skb_queue_tail(&peer->rxqueue, skb);
+ schedule_work(&peer->tx_work);
+ break;
}
/* Fall through and free skb otherwise */
case __SK_DROP:
@@ -174,68 +1141,6 @@ static void smap_report_sk_error(struct smap_psock *psock, int err)
sk->sk_error_report(sk);
}
-static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
-
-/* Called with lock_sock(sk) held */
-static void smap_state_change(struct sock *sk)
-{
- struct smap_psock_map_entry *e, *tmp;
- struct smap_psock *psock;
- struct socket_wq *wq;
- struct sock *osk;
-
- rcu_read_lock();
-
- /* Allowing transitions into an established syn_recv states allows
- * for early binding sockets to a smap object before the connection
- * is established.
- */
- switch (sk->sk_state) {
- case TCP_SYN_SENT:
- case TCP_SYN_RECV:
- case TCP_ESTABLISHED:
- break;
- case TCP_CLOSE_WAIT:
- case TCP_CLOSING:
- case TCP_LAST_ACK:
- case TCP_FIN_WAIT1:
- case TCP_FIN_WAIT2:
- case TCP_LISTEN:
- break;
- case TCP_CLOSE:
- /* Only release if the map entry is in fact the sock in
- * question. There is a case where the operator deletes
- * the sock from the map, but the TCP sock is closed before
- * the psock is detached. Use cmpxchg to verify correct
- * sock is removed.
- */
- psock = smap_psock_sk(sk);
- if (unlikely(!psock))
- break;
- write_lock_bh(&sk->sk_callback_lock);
- list_for_each_entry_safe(e, tmp, &psock->maps, list) {
- osk = cmpxchg(e->entry, sk, NULL);
- if (osk == sk) {
- list_del(&e->list);
- smap_release_sock(psock, sk);
- }
- }
- write_unlock_bh(&sk->sk_callback_lock);
- break;
- default:
- psock = smap_psock_sk(sk);
- if (unlikely(!psock))
- break;
- smap_report_sk_error(psock, EPIPE);
- break;
- }
-
- wq = rcu_dereference(sk->sk_wq);
- if (skwq_has_sleeper(wq))
- wake_up_interruptible_all(&wq->wait);
- rcu_read_unlock();
-}
-
static void smap_read_sock_strparser(struct strparser *strp,
struct sk_buff *skb)
{
@@ -281,15 +1186,23 @@ static void smap_tx_work(struct work_struct *w)
}
while ((skb = skb_dequeue(&psock->rxqueue))) {
+ __u32 flags;
+
rem = skb->len;
off = 0;
start:
+ flags = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
do {
- if (likely(psock->sock->sk_socket))
- n = skb_send_sock_locked(psock->sock,
- skb, off, rem);
- else
+ if (likely(psock->sock->sk_socket)) {
+ if (flags)
+ n = smap_do_ingress(psock, skb);
+ else
+ n = skb_send_sock_locked(psock->sock,
+ skb, off, rem);
+ } else {
n = -EINVAL;
+ }
+
if (n <= 0) {
if (n == -EAGAIN) {
/* Retry when space is available */
@@ -307,7 +1220,9 @@ start:
rem -= n;
off += n;
} while (rem);
- kfree_skb(skb);
+
+ if (!flags)
+ kfree_skb(skb);
}
out:
release_sock(psock->sock);
@@ -330,10 +1245,8 @@ static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
return;
sk->sk_data_ready = psock->save_data_ready;
sk->sk_write_space = psock->save_write_space;
- sk->sk_state_change = psock->save_state_change;
psock->save_data_ready = NULL;
psock->save_write_space = NULL;
- psock->save_state_change = NULL;
strp_stop(&psock->strp);
psock->strp_enabled = false;
}
@@ -354,14 +1267,13 @@ static void smap_destroy_psock(struct rcu_head *rcu)
static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
{
- psock->refcnt--;
- if (psock->refcnt)
- return;
-
- smap_stop_sock(psock, sock);
- clear_bit(SMAP_TX_RUNNING, &psock->state);
- rcu_assign_sk_user_data(sock, NULL);
- call_rcu_sched(&psock->rcu, smap_destroy_psock);
+ if (refcount_dec_and_test(&psock->refcnt)) {
+ tcp_cleanup_ulp(sock);
+ smap_stop_sock(psock, sock);
+ clear_bit(SMAP_TX_RUNNING, &psock->state);
+ rcu_assign_sk_user_data(sock, NULL);
+ call_rcu_sched(&psock->rcu, smap_destroy_psock);
+ }
}
static int smap_parse_func_strparser(struct strparser *strp,
@@ -395,7 +1307,6 @@ static int smap_parse_func_strparser(struct strparser *strp,
return rc;
}
-
static int smap_read_sock_done(struct strparser *strp, int err)
{
return err;
@@ -435,10 +1346,8 @@ static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
return;
psock->save_data_ready = sk->sk_data_ready;
psock->save_write_space = sk->sk_write_space;
- psock->save_state_change = sk->sk_state_change;
sk->sk_data_ready = smap_data_ready;
sk->sk_write_space = smap_write_space;
- sk->sk_state_change = smap_state_change;
psock->strp_enabled = true;
}
@@ -451,6 +1360,7 @@ static void sock_map_remove_complete(struct bpf_stab *stab)
static void smap_gc_work(struct work_struct *w)
{
struct smap_psock_map_entry *e, *tmp;
+ struct sk_msg_buff *md, *mtmp;
struct smap_psock *psock;
psock = container_of(w, struct smap_psock, gc_work);
@@ -467,12 +1377,28 @@ static void smap_gc_work(struct work_struct *w)
bpf_prog_put(psock->bpf_parse);
if (psock->bpf_verdict)
bpf_prog_put(psock->bpf_verdict);
+ if (psock->bpf_tx_msg)
+ bpf_prog_put(psock->bpf_tx_msg);
+
+ if (psock->cork) {
+ free_start_sg(psock->sock, psock->cork);
+ kfree(psock->cork);
+ }
+
+ list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
+ list_del(&md->list);
+ free_start_sg(psock->sock, md);
+ kfree(md);
+ }
list_for_each_entry_safe(e, tmp, &psock->maps, list) {
list_del(&e->list);
kfree(e);
}
+ if (psock->sk_redir)
+ sock_put(psock->sk_redir);
+
sock_put(psock->sock);
kfree(psock);
}
@@ -488,12 +1414,14 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
if (!psock)
return ERR_PTR(-ENOMEM);
+ psock->eval = __SK_NONE;
psock->sock = sock;
skb_queue_head_init(&psock->rxqueue);
INIT_WORK(&psock->tx_work, smap_tx_work);
INIT_WORK(&psock->gc_work, smap_gc_work);
INIT_LIST_HEAD(&psock->maps);
- psock->refcnt = 1;
+ INIT_LIST_HEAD(&psock->ingress);
+ refcount_set(&psock->refcnt, 1);
rcu_assign_sk_user_data(sock, psock);
sock_hold(sock);
@@ -503,8 +1431,8 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
{
struct bpf_stab *stab;
- int err = -EINVAL;
u64 cost;
+ int err;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -517,20 +1445,19 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
if (attr->value_size > KMALLOC_MAX_SIZE)
return ERR_PTR(-E2BIG);
+ err = bpf_tcp_ulp_register();
+ if (err && err != -EEXIST)
+ return ERR_PTR(err);
+
stab = kzalloc(sizeof(*stab), GFP_USER);
if (!stab)
return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- stab->map.map_type = attr->map_type;
- stab->map.key_size = attr->key_size;
- stab->map.value_size = attr->value_size;
- stab->map.max_entries = attr->max_entries;
- stab->map.map_flags = attr->map_flags;
- stab->map.numa_node = bpf_map_attr_numa_node(attr);
+ bpf_map_init_from_attr(&stab->map, attr);
/* make sure page count doesn't overflow */
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
+ err = -EINVAL;
if (cost >= U32_MAX - PAGE_SIZE)
goto free_stab;
@@ -604,11 +1531,6 @@ static void sock_map_free(struct bpf_map *map)
}
rcu_read_unlock();
- if (stab->bpf_verdict)
- bpf_prog_put(stab->bpf_verdict);
- if (stab->bpf_parse)
- bpf_prog_put(stab->bpf_parse);
-
sock_map_remove_complete(stab);
}
@@ -702,10 +1624,11 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
{
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
struct smap_psock_map_entry *e = NULL;
- struct bpf_prog *verdict, *parse;
+ struct bpf_prog *verdict, *parse, *tx_msg;
struct sock *osock, *sock;
struct smap_psock *psock;
u32 i = *(u32 *)key;
+ bool new = false;
int err;
if (unlikely(flags > BPF_EXIST))
@@ -728,6 +1651,7 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
*/
verdict = READ_ONCE(stab->bpf_verdict);
parse = READ_ONCE(stab->bpf_parse);
+ tx_msg = READ_ONCE(stab->bpf_tx_msg);
if (parse && verdict) {
/* bpf prog refcnt may be zero if a concurrent attach operation
@@ -746,6 +1670,17 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
}
}
+ if (tx_msg) {
+ tx_msg = bpf_prog_inc_not_zero(stab->bpf_tx_msg);
+ if (IS_ERR(tx_msg)) {
+ if (verdict)
+ bpf_prog_put(verdict);
+ if (parse)
+ bpf_prog_put(parse);
+ return PTR_ERR(tx_msg);
+ }
+ }
+
write_lock_bh(&sock->sk_callback_lock);
psock = smap_psock_sk(sock);
@@ -760,7 +1695,14 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
err = -EBUSY;
goto out_progs;
}
- psock->refcnt++;
+ if (READ_ONCE(psock->bpf_tx_msg) && tx_msg) {
+ err = -EBUSY;
+ goto out_progs;
+ }
+ if (!refcount_inc_not_zero(&psock->refcnt)) {
+ err = -EAGAIN;
+ goto out_progs;
+ }
} else {
psock = smap_init_psock(sock, stab);
if (IS_ERR(psock)) {
@@ -769,6 +1711,7 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
}
set_bit(SMAP_TX_RUNNING, &psock->state);
+ new = true;
}
e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
@@ -781,6 +1724,14 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
/* 3. At this point we have a reference to a valid psock that is
* running. Attach any BPF programs needed.
*/
+ if (tx_msg)
+ bpf_tcp_msg_add(psock, sock, tx_msg);
+ if (new) {
+ err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
+ if (err)
+ goto out_free;
+ }
+
if (parse && verdict && !psock->strp_enabled) {
err = smap_init_sock(psock, sock);
if (err)
@@ -802,8 +1753,6 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
struct smap_psock *opsock = smap_psock_sk(osock);
write_lock_bh(&osock->sk_callback_lock);
- if (osock != sock && parse)
- smap_stop_sock(opsock, osock);
smap_list_remove(opsock, &stab->sock_map[i]);
smap_release_sock(opsock, osock);
write_unlock_bh(&osock->sk_callback_lock);
@@ -816,6 +1765,8 @@ out_progs:
bpf_prog_put(verdict);
if (parse)
bpf_prog_put(parse);
+ if (tx_msg)
+ bpf_prog_put(tx_msg);
write_unlock_bh(&sock->sk_callback_lock);
kfree(e);
return err;
@@ -830,6 +1781,9 @@ int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
return -EINVAL;
switch (type) {
+ case BPF_SK_MSG_VERDICT:
+ orig = xchg(&stab->bpf_tx_msg, prog);
+ break;
case BPF_SK_SKB_STREAM_PARSER:
orig = xchg(&stab->bpf_parse, prog);
break;
@@ -880,6 +1834,23 @@ static int sock_map_update_elem(struct bpf_map *map,
return err;
}
+static void sock_map_release(struct bpf_map *map, struct file *map_file)
+{
+ struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+ struct bpf_prog *orig;
+
+ orig = xchg(&stab->bpf_parse, NULL);
+ if (orig)
+ bpf_prog_put(orig);
+ orig = xchg(&stab->bpf_verdict, NULL);
+ if (orig)
+ bpf_prog_put(orig);
+
+ orig = xchg(&stab->bpf_tx_msg, NULL);
+ if (orig)
+ bpf_prog_put(orig);
+}
+
const struct bpf_map_ops sock_map_ops = {
.map_alloc = sock_map_alloc,
.map_free = sock_map_free,
@@ -887,6 +1858,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_get_next_key = sock_map_get_next_key,
.map_update_elem = sock_map_update_elem,
.map_delete_elem = sock_map_delete_elem,
+ .map_release = sock_map_release,
};
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index a15bc636cc98..57eeb1234b67 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -9,16 +9,19 @@
#include <linux/filter.h>
#include <linux/stacktrace.h>
#include <linux/perf_event.h>
+#include <linux/elf.h>
+#include <linux/pagemap.h>
#include "percpu_freelist.h"
-#define STACK_CREATE_FLAG_MASK \
- (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
+#define STACK_CREATE_FLAG_MASK \
+ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \
+ BPF_F_STACK_BUILD_ID)
struct stack_map_bucket {
struct pcpu_freelist_node fnode;
u32 hash;
u32 nr;
- u64 ip[];
+ u64 data[];
};
struct bpf_stack_map {
@@ -29,6 +32,17 @@ struct bpf_stack_map {
struct stack_map_bucket *buckets[];
};
+static inline bool stack_map_use_build_id(struct bpf_map *map)
+{
+ return (map->map_flags & BPF_F_STACK_BUILD_ID);
+}
+
+static inline int stack_map_data_size(struct bpf_map *map)
+{
+ return stack_map_use_build_id(map) ?
+ sizeof(struct bpf_stack_build_id) : sizeof(u64);
+}
+
static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
{
u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
@@ -68,8 +82,16 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
- value_size < 8 || value_size % 8 ||
- value_size / 8 > sysctl_perf_event_max_stack)
+ value_size < 8 || value_size % 8)
+ return ERR_PTR(-EINVAL);
+
+ BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
+ if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
+ if (value_size % sizeof(struct bpf_stack_build_id) ||
+ value_size / sizeof(struct bpf_stack_build_id)
+ > sysctl_perf_event_max_stack)
+ return ERR_PTR(-EINVAL);
+ } else if (value_size / 8 > sysctl_perf_event_max_stack)
return ERR_PTR(-EINVAL);
/* hash table size must be power of 2 */
@@ -88,14 +110,10 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
if (cost >= U32_MAX - PAGE_SIZE)
goto free_smap;
- smap->map.map_type = attr->map_type;
- smap->map.key_size = attr->key_size;
+ bpf_map_init_from_attr(&smap->map, attr);
smap->map.value_size = value_size;
- smap->map.max_entries = attr->max_entries;
- smap->map.map_flags = attr->map_flags;
smap->n_buckets = n_buckets;
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- smap->map.numa_node = bpf_map_attr_numa_node(attr);
err = bpf_map_precharge_memlock(smap->map.pages);
if (err)
@@ -118,13 +136,184 @@ free_smap:
return ERR_PTR(err);
}
+#define BPF_BUILD_ID 3
+/*
+ * Parse build id from the note segment. This logic can be shared between
+ * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
+ * identical.
+ */
+static inline int stack_map_parse_build_id(void *page_addr,
+ unsigned char *build_id,
+ void *note_start,
+ Elf32_Word note_size)
+{
+ Elf32_Word note_offs = 0, new_offs;
+
+ /* check for overflow */
+ if (note_start < page_addr || note_start + note_size < note_start)
+ return -EINVAL;
+
+ /* only supports note that fits in the first page */
+ if (note_start + note_size > page_addr + PAGE_SIZE)
+ return -EINVAL;
+
+ while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
+ Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
+
+ if (nhdr->n_type == BPF_BUILD_ID &&
+ nhdr->n_namesz == sizeof("GNU") &&
+ nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
+ memcpy(build_id,
+ note_start + note_offs +
+ ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
+ BPF_BUILD_ID_SIZE);
+ return 0;
+ }
+ new_offs = note_offs + sizeof(Elf32_Nhdr) +
+ ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
+ if (new_offs <= note_offs) /* overflow */
+ break;
+ note_offs = new_offs;
+ }
+ return -EINVAL;
+}
+
+/* Parse build ID from 32-bit ELF */
+static int stack_map_get_build_id_32(void *page_addr,
+ unsigned char *build_id)
+{
+ Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
+ Elf32_Phdr *phdr;
+ int i;
+
+ /* only supports phdr that fits in one page */
+ if (ehdr->e_phnum >
+ (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
+
+ for (i = 0; i < ehdr->e_phnum; ++i)
+ if (phdr[i].p_type == PT_NOTE)
+ return stack_map_parse_build_id(page_addr, build_id,
+ page_addr + phdr[i].p_offset,
+ phdr[i].p_filesz);
+ return -EINVAL;
+}
+
+/* Parse build ID from 64-bit ELF */
+static int stack_map_get_build_id_64(void *page_addr,
+ unsigned char *build_id)
+{
+ Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
+ Elf64_Phdr *phdr;
+ int i;
+
+ /* only supports phdr that fits in one page */
+ if (ehdr->e_phnum >
+ (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
+
+ for (i = 0; i < ehdr->e_phnum; ++i)
+ if (phdr[i].p_type == PT_NOTE)
+ return stack_map_parse_build_id(page_addr, build_id,
+ page_addr + phdr[i].p_offset,
+ phdr[i].p_filesz);
+ return -EINVAL;
+}
+
+/* Parse build ID of ELF file mapped to vma */
+static int stack_map_get_build_id(struct vm_area_struct *vma,
+ unsigned char *build_id)
+{
+ Elf32_Ehdr *ehdr;
+ struct page *page;
+ void *page_addr;
+ int ret;
+
+ /* only works for page backed storage */
+ if (!vma->vm_file)
+ return -EINVAL;
+
+ page = find_get_page(vma->vm_file->f_mapping, 0);
+ if (!page)
+ return -EFAULT; /* page not mapped */
+
+ ret = -EINVAL;
+ page_addr = page_address(page);
+ ehdr = (Elf32_Ehdr *)page_addr;
+
+ /* compare magic x7f "ELF" */
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
+ goto out;
+
+ /* only support executable file and shared object file */
+ if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
+ goto out;
+
+ if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
+ ret = stack_map_get_build_id_32(page_addr, build_id);
+ else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
+ ret = stack_map_get_build_id_64(page_addr, build_id);
+out:
+ put_page(page);
+ return ret;
+}
+
+static void stack_map_get_build_id_offset(struct bpf_map *map,
+ struct stack_map_bucket *bucket,
+ u64 *ips, u32 trace_nr, bool user)
+{
+ int i;
+ struct vm_area_struct *vma;
+ struct bpf_stack_build_id *id_offs;
+
+ bucket->nr = trace_nr;
+ id_offs = (struct bpf_stack_build_id *)bucket->data;
+
+ /*
+ * We cannot do up_read() in nmi context, so build_id lookup is
+ * only supported for non-nmi events. If at some point, it is
+ * possible to run find_vma() without taking the semaphore, we
+ * would like to allow build_id lookup in nmi context.
+ *
+ * Same fallback is used for kernel stack (!user) on a stackmap
+ * with build_id.
+ */
+ if (!user || !current || !current->mm || in_nmi() ||
+ down_read_trylock(&current->mm->mmap_sem) == 0) {
+ /* cannot access current->mm, fall back to ips */
+ for (i = 0; i < trace_nr; i++) {
+ id_offs[i].status = BPF_STACK_BUILD_ID_IP;
+ id_offs[i].ip = ips[i];
+ }
+ return;
+ }
+
+ for (i = 0; i < trace_nr; i++) {
+ vma = find_vma(current->mm, ips[i]);
+ if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) {
+ /* per entry fall back to ips */
+ id_offs[i].status = BPF_STACK_BUILD_ID_IP;
+ id_offs[i].ip = ips[i];
+ continue;
+ }
+ id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
+ - vma->vm_start;
+ id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
+ }
+ up_read(&current->mm->mmap_sem);
+}
+
BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
u64, flags)
{
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
struct perf_callchain_entry *trace;
struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
- u32 max_depth = map->value_size / 8;
+ u32 max_depth = map->value_size / stack_map_data_size(map);
/* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
u32 init_nr = sysctl_perf_event_max_stack - max_depth;
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
@@ -132,6 +321,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
bool user = flags & BPF_F_USER_STACK;
bool kernel = !user;
u64 *ips;
+ bool hash_matches;
if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
@@ -160,24 +350,43 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
id = hash & (smap->n_buckets - 1);
bucket = READ_ONCE(smap->buckets[id]);
- if (bucket && bucket->hash == hash) {
- if (flags & BPF_F_FAST_STACK_CMP)
+ hash_matches = bucket && bucket->hash == hash;
+ /* fast cmp */
+ if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
+ return id;
+
+ if (stack_map_use_build_id(map)) {
+ /* for build_id+offset, pop a bucket before slow cmp */
+ new_bucket = (struct stack_map_bucket *)
+ pcpu_freelist_pop(&smap->freelist);
+ if (unlikely(!new_bucket))
+ return -ENOMEM;
+ stack_map_get_build_id_offset(map, new_bucket, ips,
+ trace_nr, user);
+ trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
+ if (hash_matches && bucket->nr == trace_nr &&
+ memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
+ pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
return id;
- if (bucket->nr == trace_nr &&
- memcmp(bucket->ip, ips, trace_len) == 0)
+ }
+ if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
+ pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
+ return -EEXIST;
+ }
+ } else {
+ if (hash_matches && bucket->nr == trace_nr &&
+ memcmp(bucket->data, ips, trace_len) == 0)
return id;
+ if (bucket && !(flags & BPF_F_REUSE_STACKID))
+ return -EEXIST;
+
+ new_bucket = (struct stack_map_bucket *)
+ pcpu_freelist_pop(&smap->freelist);
+ if (unlikely(!new_bucket))
+ return -ENOMEM;
+ memcpy(new_bucket->data, ips, trace_len);
}
- /* this call stack is not in the map, try to add it */
- if (bucket && !(flags & BPF_F_REUSE_STACKID))
- return -EEXIST;
-
- new_bucket = (struct stack_map_bucket *)
- pcpu_freelist_pop(&smap->freelist);
- if (unlikely(!new_bucket))
- return -ENOMEM;
-
- memcpy(new_bucket->ip, ips, trace_len);
new_bucket->hash = hash;
new_bucket->nr = trace_nr;
@@ -216,8 +425,8 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
if (!bucket)
return -ENOENT;
- trace_len = bucket->nr * sizeof(u64);
- memcpy(value, bucket->ip, trace_len);
+ trace_len = bucket->nr * stack_map_data_size(map);
+ memcpy(value, bucket->data, trace_len);
memset(value + trace_len, 0, map->value_size - trace_len);
old_bucket = xchg(&smap->buckets[id], bucket);
@@ -226,9 +435,33 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
return 0;
}
-static int stack_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+static int stack_map_get_next_key(struct bpf_map *map, void *key,
+ void *next_key)
{
- return -EINVAL;
+ struct bpf_stack_map *smap = container_of(map,
+ struct bpf_stack_map, map);
+ u32 id;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (!key) {
+ id = 0;
+ } else {
+ id = *(u32 *)key;
+ if (id >= smap->n_buckets || !smap->buckets[id])
+ id = 0;
+ else
+ id++;
+ }
+
+ while (id < smap->n_buckets && !smap->buckets[id])
+ id++;
+
+ if (id >= smap->n_buckets)
+ return -ENOENT;
+
+ *(u32 *)next_key = id;
+ return 0;
}
static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5cb783fc8224..4ca46df19c9a 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -94,18 +94,34 @@ static int check_uarg_tail_zero(void __user *uaddr,
return 0;
}
+const struct bpf_map_ops bpf_map_offload_ops = {
+ .map_alloc = bpf_map_offload_map_alloc,
+ .map_free = bpf_map_offload_map_free,
+};
+
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
{
+ const struct bpf_map_ops *ops;
struct bpf_map *map;
+ int err;
- if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
- !bpf_map_types[attr->map_type])
+ if (attr->map_type >= ARRAY_SIZE(bpf_map_types))
+ return ERR_PTR(-EINVAL);
+ ops = bpf_map_types[attr->map_type];
+ if (!ops)
return ERR_PTR(-EINVAL);
- map = bpf_map_types[attr->map_type]->map_alloc(attr);
+ if (ops->map_alloc_check) {
+ err = ops->map_alloc_check(attr);
+ if (err)
+ return ERR_PTR(err);
+ }
+ if (attr->map_ifindex)
+ ops = &bpf_map_offload_ops;
+ map = ops->map_alloc(attr);
if (IS_ERR(map))
return map;
- map->ops = bpf_map_types[attr->map_type];
+ map->ops = ops;
map->map_type = attr->map_type;
return map;
}
@@ -134,6 +150,16 @@ void bpf_map_area_free(void *area)
kvfree(area);
}
+void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
+{
+ map->map_type = attr->map_type;
+ map->key_size = attr->key_size;
+ map->value_size = attr->value_size;
+ map->max_entries = attr->max_entries;
+ map->map_flags = attr->map_flags;
+ map->numa_node = bpf_map_attr_numa_node(attr);
+}
+
int bpf_map_precharge_memlock(u32 pages)
{
struct user_struct *user = get_current_user();
@@ -177,11 +203,13 @@ static int bpf_map_alloc_id(struct bpf_map *map)
{
int id;
+ idr_preload(GFP_KERNEL);
spin_lock_bh(&map_idr_lock);
id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
if (id > 0)
map->id = id;
spin_unlock_bh(&map_idr_lock);
+ idr_preload_end();
if (WARN_ON_ONCE(!id))
return -ENOSPC;
@@ -189,16 +217,25 @@ static int bpf_map_alloc_id(struct bpf_map *map)
return id > 0 ? 0 : id;
}
-static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
+void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
{
unsigned long flags;
+ /* Offloaded maps are removed from the IDR store when their device
+ * disappears - even if someone holds an fd to them they are unusable,
+ * the memory is gone, all ops will fail; they are simply waiting for
+ * refcnt to drop to be freed.
+ */
+ if (!map->id)
+ return;
+
if (do_idr_lock)
spin_lock_irqsave(&map_idr_lock, flags);
else
__acquire(&map_idr_lock);
idr_remove(&map_idr, map->id);
+ map->id = 0;
if (do_idr_lock)
spin_unlock_irqrestore(&map_idr_lock, flags);
@@ -378,7 +415,7 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
return 0;
}
-#define BPF_MAP_CREATE_LAST_FIELD map_name
+#define BPF_MAP_CREATE_LAST_FIELD map_ifindex
/* called via syscall */
static int map_create(union bpf_attr *attr)
{
@@ -566,8 +603,10 @@ static int map_lookup_elem(union bpf_attr *attr)
if (!value)
goto free_key;
- if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
- map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+ if (bpf_map_is_dev_bound(map)) {
+ err = bpf_map_offload_lookup_elem(map, key, value);
+ } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_copy(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_copy(map, key, value);
@@ -654,7 +693,10 @@ static int map_update_elem(union bpf_attr *attr)
goto free_value;
/* Need to create a kthread, thus must support schedule */
- if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
+ if (bpf_map_is_dev_bound(map)) {
+ err = bpf_map_offload_update_elem(map, key, value, attr->flags);
+ goto out;
+ } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
err = map->ops->map_update_elem(map, key, value, attr->flags);
goto out;
}
@@ -669,10 +711,7 @@ static int map_update_elem(union bpf_attr *attr)
err = bpf_percpu_hash_update(map, key, value, attr->flags);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_update(map, key, value, attr->flags);
- } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
- map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
- map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
- map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
+ } else if (IS_FD_ARRAY(map)) {
rcu_read_lock();
err = bpf_fd_array_map_update_elem(map, f.file, key, value,
attr->flags);
@@ -731,6 +770,11 @@ static int map_delete_elem(union bpf_attr *attr)
goto err_put;
}
+ if (bpf_map_is_dev_bound(map)) {
+ err = bpf_map_offload_delete_elem(map, key);
+ goto out;
+ }
+
preempt_disable();
__this_cpu_inc(bpf_prog_active);
rcu_read_lock();
@@ -738,7 +782,7 @@ static int map_delete_elem(union bpf_attr *attr)
rcu_read_unlock();
__this_cpu_dec(bpf_prog_active);
preempt_enable();
-
+out:
if (!err)
trace_bpf_map_delete_elem(map, ufd, key);
kfree(key);
@@ -788,9 +832,15 @@ static int map_get_next_key(union bpf_attr *attr)
if (!next_key)
goto free_key;
+ if (bpf_map_is_dev_bound(map)) {
+ err = bpf_map_offload_get_next_key(map, key, next_key);
+ goto out;
+ }
+
rcu_read_lock();
err = map->ops->map_get_next_key(map, key, next_key);
rcu_read_unlock();
+out:
if (err)
goto free_next_key;
@@ -892,11 +942,13 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)
{
int id;
+ idr_preload(GFP_KERNEL);
spin_lock_bh(&prog_idr_lock);
id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
if (id > 0)
prog->aux->id = id;
spin_unlock_bh(&prog_idr_lock);
+ idr_preload_end();
/* id is in [1, INT_MAX) */
if (WARN_ON_ONCE(!id))
@@ -905,9 +957,13 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)
return id > 0 ? 0 : id;
}
-static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
+void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
{
- /* cBPF to eBPF migrations are currently not in the idr store. */
+ /* cBPF to eBPF migrations are currently not in the idr store.
+ * Offloaded programs are removed from the store when their device
+ * disappears - even if someone grabs an fd to them they are unusable,
+ * simply waiting for refcnt to drop to be freed.
+ */
if (!prog->aux->id)
return;
@@ -917,6 +973,7 @@ static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
__acquire(&prog_idr_lock);
idr_remove(&prog_idr, prog->aux->id);
+ prog->aux->id = 0;
if (do_idr_lock)
spin_unlock_bh(&prog_idr_lock);
@@ -937,10 +994,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
{
if (atomic_dec_and_test(&prog->aux->refcnt)) {
+ int i;
+
trace_bpf_prog_put_rcu(prog);
/* bpf_prog_free_id() must be called first */
bpf_prog_free_id(prog, do_idr_lock);
+
+ for (i = 0; i < prog->aux->func_cnt; i++)
+ bpf_prog_kallsyms_del(prog->aux->func[i]);
bpf_prog_kallsyms_del(prog);
+
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
}
}
@@ -1108,8 +1171,63 @@ struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
}
EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
+/* Initially all BPF programs could be loaded w/o specifying
+ * expected_attach_type. Later for some of them specifying expected_attach_type
+ * at load time became required so that program could be validated properly.
+ * Programs of types that are allowed to be loaded both w/ and w/o (for
+ * backward compatibility) expected_attach_type, should have the default attach
+ * type assigned to expected_attach_type for the latter case, so that it can be
+ * validated later at attach time.
+ *
+ * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
+ * prog type requires it but has some attach types that have to be backward
+ * compatible.
+ */
+static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
+{
+ switch (attr->prog_type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
+ * exist so checking for non-zero is the way to go here.
+ */
+ if (!attr->expected_attach_type)
+ attr->expected_attach_type =
+ BPF_CGROUP_INET_SOCK_CREATE;
+ break;
+ }
+}
+
+static int
+bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
+ enum bpf_attach_type expected_attach_type)
+{
+ switch (prog_type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ switch (expected_attach_type) {
+ case BPF_CGROUP_INET_SOCK_CREATE:
+ case BPF_CGROUP_INET4_POST_BIND:
+ case BPF_CGROUP_INET6_POST_BIND:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ switch (expected_attach_type) {
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
+ case BPF_CGROUP_INET4_CONNECT:
+ case BPF_CGROUP_INET6_CONNECT:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return 0;
+ }
+}
+
/* last field in 'union bpf_attr' used by this command */
-#define BPF_PROG_LOAD_LAST_FIELD prog_ifindex
+#define BPF_PROG_LOAD_LAST_FIELD expected_attach_type
static int bpf_prog_load(union bpf_attr *attr)
{
@@ -1146,11 +1264,19 @@ static int bpf_prog_load(union bpf_attr *attr)
!capable(CAP_SYS_ADMIN))
return -EPERM;
+ bpf_prog_load_fixup_attach_type(attr);
+ if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type))
+ return -EINVAL;
+
/* plain bpf_prog allocation */
prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
if (!prog)
return -ENOMEM;
+ prog->expected_attach_type = attr->expected_attach_type;
+
+ prog->aux->offload_requested = !!attr->prog_ifindex;
+
err = security_bpf_prog_alloc(prog->aux);
if (err)
goto free_prog_nouncharge;
@@ -1172,7 +1298,7 @@ static int bpf_prog_load(union bpf_attr *attr)
atomic_set(&prog->aux->refcnt, 1);
prog->gpl_compatible = is_gpl ? 1 : 0;
- if (attr->prog_ifindex) {
+ if (bpf_prog_is_dev_bound(prog->aux)) {
err = bpf_prog_offload_init(prog, attr);
if (err)
goto free_prog;
@@ -1194,7 +1320,8 @@ static int bpf_prog_load(union bpf_attr *attr)
goto free_used_maps;
/* eBPF program is ready to be JITed */
- prog = bpf_prog_select_runtime(prog, &err);
+ if (!prog->bpf_func)
+ prog = bpf_prog_select_runtime(prog, &err);
if (err < 0)
goto free_used_maps;
@@ -1249,11 +1376,99 @@ static int bpf_obj_get(const union bpf_attr *attr)
attr->file_flags);
}
+struct bpf_raw_tracepoint {
+ struct bpf_raw_event_map *btp;
+ struct bpf_prog *prog;
+};
+
+static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
+{
+ struct bpf_raw_tracepoint *raw_tp = filp->private_data;
+
+ if (raw_tp->prog) {
+ bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
+ bpf_prog_put(raw_tp->prog);
+ }
+ kfree(raw_tp);
+ return 0;
+}
+
+static const struct file_operations bpf_raw_tp_fops = {
+ .release = bpf_raw_tracepoint_release,
+ .read = bpf_dummy_read,
+ .write = bpf_dummy_write,
+};
+
+#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
+
+static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
+{
+ struct bpf_raw_tracepoint *raw_tp;
+ struct bpf_raw_event_map *btp;
+ struct bpf_prog *prog;
+ char tp_name[128];
+ int tp_fd, err;
+
+ if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name),
+ sizeof(tp_name) - 1) < 0)
+ return -EFAULT;
+ tp_name[sizeof(tp_name) - 1] = 0;
+
+ btp = bpf_find_raw_tracepoint(tp_name);
+ if (!btp)
+ return -ENOENT;
+
+ raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
+ if (!raw_tp)
+ return -ENOMEM;
+ raw_tp->btp = btp;
+
+ prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd,
+ BPF_PROG_TYPE_RAW_TRACEPOINT);
+ if (IS_ERR(prog)) {
+ err = PTR_ERR(prog);
+ goto out_free_tp;
+ }
+
+ err = bpf_probe_register(raw_tp->btp, prog);
+ if (err)
+ goto out_put_prog;
+
+ raw_tp->prog = prog;
+ tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp,
+ O_CLOEXEC);
+ if (tp_fd < 0) {
+ bpf_probe_unregister(raw_tp->btp, prog);
+ err = tp_fd;
+ goto out_put_prog;
+ }
+ return tp_fd;
+
+out_put_prog:
+ bpf_prog_put(prog);
+out_free_tp:
+ kfree(raw_tp);
+ return err;
+}
+
#ifdef CONFIG_CGROUP_BPF
+static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
+ enum bpf_attach_type attach_type)
+{
+ switch (prog->type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
+ default:
+ return 0;
+ }
+}
+
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
-static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
+static int sockmap_get_from_fd(const union bpf_attr *attr,
+ int type, bool attach)
{
struct bpf_prog *prog = NULL;
int ufd = attr->target_fd;
@@ -1267,8 +1482,7 @@ static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
return PTR_ERR(map);
if (attach) {
- prog = bpf_prog_get_type(attr->attach_bpf_fd,
- BPF_PROG_TYPE_SK_SKB);
+ prog = bpf_prog_get_type(attr->attach_bpf_fd, type);
if (IS_ERR(prog)) {
fdput(f);
return PTR_ERR(prog);
@@ -1312,17 +1526,27 @@ static int bpf_prog_attach(const union bpf_attr *attr)
ptype = BPF_PROG_TYPE_CGROUP_SKB;
break;
case BPF_CGROUP_INET_SOCK_CREATE:
+ case BPF_CGROUP_INET4_POST_BIND:
+ case BPF_CGROUP_INET6_POST_BIND:
ptype = BPF_PROG_TYPE_CGROUP_SOCK;
break;
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
+ case BPF_CGROUP_INET4_CONNECT:
+ case BPF_CGROUP_INET6_CONNECT:
+ ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
+ break;
case BPF_CGROUP_SOCK_OPS:
ptype = BPF_PROG_TYPE_SOCK_OPS;
break;
case BPF_CGROUP_DEVICE:
ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
break;
+ case BPF_SK_MSG_VERDICT:
+ return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true);
case BPF_SK_SKB_STREAM_PARSER:
case BPF_SK_SKB_STREAM_VERDICT:
- return sockmap_get_from_fd(attr, true);
+ return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true);
default:
return -EINVAL;
}
@@ -1331,6 +1555,11 @@ static int bpf_prog_attach(const union bpf_attr *attr)
if (IS_ERR(prog))
return PTR_ERR(prog);
+ if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
+ bpf_prog_put(prog);
+ return -EINVAL;
+ }
+
cgrp = cgroup_get_from_fd(attr->target_fd);
if (IS_ERR(cgrp)) {
bpf_prog_put(prog);
@@ -1367,17 +1596,27 @@ static int bpf_prog_detach(const union bpf_attr *attr)
ptype = BPF_PROG_TYPE_CGROUP_SKB;
break;
case BPF_CGROUP_INET_SOCK_CREATE:
+ case BPF_CGROUP_INET4_POST_BIND:
+ case BPF_CGROUP_INET6_POST_BIND:
ptype = BPF_PROG_TYPE_CGROUP_SOCK;
break;
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
+ case BPF_CGROUP_INET4_CONNECT:
+ case BPF_CGROUP_INET6_CONNECT:
+ ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
+ break;
case BPF_CGROUP_SOCK_OPS:
ptype = BPF_PROG_TYPE_SOCK_OPS;
break;
case BPF_CGROUP_DEVICE:
ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
break;
+ case BPF_SK_MSG_VERDICT:
+ return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false);
case BPF_SK_SKB_STREAM_PARSER:
case BPF_SK_SKB_STREAM_VERDICT:
- return sockmap_get_from_fd(attr, false);
+ return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false);
default:
return -EINVAL;
}
@@ -1416,6 +1655,12 @@ static int bpf_prog_query(const union bpf_attr *attr,
case BPF_CGROUP_INET_INGRESS:
case BPF_CGROUP_INET_EGRESS:
case BPF_CGROUP_INET_SOCK_CREATE:
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
+ case BPF_CGROUP_INET4_POST_BIND:
+ case BPF_CGROUP_INET6_POST_BIND:
+ case BPF_CGROUP_INET4_CONNECT:
+ case BPF_CGROUP_INET6_CONNECT:
case BPF_CGROUP_SOCK_OPS:
case BPF_CGROUP_DEVICE:
break;
@@ -1439,6 +1684,8 @@ static int bpf_prog_test_run(const union bpf_attr *attr,
struct bpf_prog *prog;
int ret = -ENOTSUPP;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
if (CHECK_ATTR(BPF_PROG_TEST_RUN))
return -EINVAL;
@@ -1551,6 +1798,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
return fd;
}
+static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
+ unsigned long addr)
+{
+ int i;
+
+ for (i = 0; i < prog->aux->used_map_cnt; i++)
+ if (prog->aux->used_maps[i] == (void *)addr)
+ return prog->aux->used_maps[i];
+ return NULL;
+}
+
+static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+{
+ const struct bpf_map *map;
+ struct bpf_insn *insns;
+ u64 imm;
+ int i;
+
+ insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
+ GFP_USER);
+ if (!insns)
+ return insns;
+
+ for (i = 0; i < prog->len; i++) {
+ if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
+ insns[i].code = BPF_JMP | BPF_CALL;
+ insns[i].imm = BPF_FUNC_tail_call;
+ /* fall-through */
+ }
+ if (insns[i].code == (BPF_JMP | BPF_CALL) ||
+ insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
+ if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
+ insns[i].code = BPF_JMP | BPF_CALL;
+ if (!bpf_dump_raw_ok())
+ insns[i].imm = 0;
+ continue;
+ }
+
+ if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
+ continue;
+
+ imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
+ map = bpf_map_from_imm(prog, imm);
+ if (map) {
+ insns[i].src_reg = BPF_PSEUDO_MAP_FD;
+ insns[i].imm = map->id;
+ insns[i + 1].imm = 0;
+ continue;
+ }
+
+ if (!bpf_dump_raw_ok() &&
+ imm == (unsigned long)prog->aux) {
+ insns[i].imm = 0;
+ insns[i + 1].imm = 0;
+ continue;
+ }
+ }
+
+ return insns;
+}
+
static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
@@ -1598,24 +1906,51 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
goto done;
}
- ulen = info.jited_prog_len;
- info.jited_prog_len = prog->jited_len;
- if (info.jited_prog_len && ulen) {
- uinsns = u64_to_user_ptr(info.jited_prog_insns);
- ulen = min_t(u32, info.jited_prog_len, ulen);
- if (copy_to_user(uinsns, prog->bpf_func, ulen))
- return -EFAULT;
- }
-
ulen = info.xlated_prog_len;
info.xlated_prog_len = bpf_prog_insn_size(prog);
if (info.xlated_prog_len && ulen) {
+ struct bpf_insn *insns_sanitized;
+ bool fault;
+
+ if (prog->blinded && !bpf_dump_raw_ok()) {
+ info.xlated_prog_insns = 0;
+ goto done;
+ }
+ insns_sanitized = bpf_insn_prepare_dump(prog);
+ if (!insns_sanitized)
+ return -ENOMEM;
uinsns = u64_to_user_ptr(info.xlated_prog_insns);
ulen = min_t(u32, info.xlated_prog_len, ulen);
- if (copy_to_user(uinsns, prog->insnsi, ulen))
+ fault = copy_to_user(uinsns, insns_sanitized, ulen);
+ kfree(insns_sanitized);
+ if (fault)
return -EFAULT;
}
+ if (bpf_prog_is_dev_bound(prog->aux)) {
+ err = bpf_prog_offload_info_fill(&info, prog);
+ if (err)
+ return err;
+ goto done;
+ }
+
+ /* NOTE: the following code is supposed to be skipped for offload.
+ * bpf_prog_offload_info_fill() is the place to fill similar fields
+ * for offload.
+ */
+ ulen = info.jited_prog_len;
+ info.jited_prog_len = prog->jited_len;
+ if (info.jited_prog_len && ulen) {
+ if (bpf_dump_raw_ok()) {
+ uinsns = u64_to_user_ptr(info.jited_prog_insns);
+ ulen = min_t(u32, info.jited_prog_len, ulen);
+ if (copy_to_user(uinsns, prog->bpf_func, ulen))
+ return -EFAULT;
+ } else {
+ info.jited_prog_insns = 0;
+ }
+ }
+
done:
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))
@@ -1646,6 +1981,12 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
info.map_flags = map->map_flags;
memcpy(info.name, map->name, sizeof(map->name));
+ if (bpf_map_is_dev_bound(map)) {
+ err = bpf_map_offload_info_fill(&info, map);
+ if (err)
+ return err;
+ }
+
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))
return -EFAULT;
@@ -1687,7 +2028,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
union bpf_attr attr = {};
int err;
- if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
+ if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
return -EPERM;
err = check_uarg_tail_zero(uattr, sizeof(attr), size);
@@ -1759,6 +2100,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_OBJ_GET_INFO_BY_FD:
err = bpf_obj_get_info_by_fd(&attr, uattr);
break;
+ case BPF_RAW_TRACEPOINT_OPEN:
+ err = bpf_raw_tracepoint_open(&attr);
+ break;
default:
err = -EINVAL;
break;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 13551e623501..5dd1dcb902bf 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -20,6 +20,8 @@
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/stringify.h>
+#include <linux/bsearch.h>
+#include <linux/sort.h>
#include "disasm.h"
@@ -166,23 +168,12 @@ struct bpf_call_arg_meta {
static DEFINE_MUTEX(bpf_verifier_lock);
-/* log_level controls verbosity level of eBPF verifier.
- * verbose() is used to dump the verification trace to the log, so the user
- * can figure out what's wrong with the program
- */
-static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
- const char *fmt, ...)
+void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
+ va_list args)
{
- struct bpf_verifer_log *log = &env->log;
unsigned int n;
- va_list args;
-
- if (!log->level || !log->ubuf || bpf_verifier_log_full(log))
- return;
- va_start(args, fmt);
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
- va_end(args);
WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
"verifier log line truncated - local buffer too short\n");
@@ -196,6 +187,37 @@ static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
log->ubuf = NULL;
}
+/* log_level controls verbosity level of eBPF verifier.
+ * bpf_verifier_log_write() is used to dump the verification trace to the log,
+ * so the user can figure out what's wrong with the program
+ */
+__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+ const char *fmt, ...)
+{
+ va_list args;
+
+ if (!bpf_verifier_log_needed(&env->log))
+ return;
+
+ va_start(args, fmt);
+ bpf_verifier_vlog(&env->log, fmt, args);
+ va_end(args);
+}
+EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
+
+__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
+{
+ struct bpf_verifier_env *env = private_data;
+ va_list args;
+
+ if (!bpf_verifier_log_needed(&env->log))
+ return;
+
+ va_start(args, fmt);
+ bpf_verifier_vlog(&env->log, fmt, args);
+ va_end(args);
+}
+
static bool type_is_pkt_pointer(enum bpf_reg_type type)
{
return type == PTR_TO_PACKET ||
@@ -216,23 +238,48 @@ static const char * const reg_type_str[] = {
[PTR_TO_PACKET_END] = "pkt_end",
};
+static void print_liveness(struct bpf_verifier_env *env,
+ enum bpf_reg_liveness live)
+{
+ if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN))
+ verbose(env, "_");
+ if (live & REG_LIVE_READ)
+ verbose(env, "r");
+ if (live & REG_LIVE_WRITTEN)
+ verbose(env, "w");
+}
+
+static struct bpf_func_state *func(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg)
+{
+ struct bpf_verifier_state *cur = env->cur_state;
+
+ return cur->frame[reg->frameno];
+}
+
static void print_verifier_state(struct bpf_verifier_env *env,
- struct bpf_verifier_state *state)
+ const struct bpf_func_state *state)
{
- struct bpf_reg_state *reg;
+ const struct bpf_reg_state *reg;
enum bpf_reg_type t;
int i;
+ if (state->frameno)
+ verbose(env, " frame%d:", state->frameno);
for (i = 0; i < MAX_BPF_REG; i++) {
reg = &state->regs[i];
t = reg->type;
if (t == NOT_INIT)
continue;
- verbose(env, " R%d=%s", i, reg_type_str[t]);
+ verbose(env, " R%d", i);
+ print_liveness(env, reg->live);
+ verbose(env, "=%s", reg_type_str[t]);
if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
tnum_is_const(reg->var_off)) {
/* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%lld", reg->var_off.value + reg->off);
+ if (t == PTR_TO_STACK)
+ verbose(env, ",call_%d", func(env, reg)->callsite);
} else {
verbose(env, "(id=%d", reg->id);
if (t != SCALAR_VALUE)
@@ -277,16 +324,21 @@ static void print_verifier_state(struct bpf_verifier_env *env,
}
}
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].slot_type[0] == STACK_SPILL)
- verbose(env, " fp%d=%s",
- -MAX_BPF_STACK + i * BPF_REG_SIZE,
+ if (state->stack[i].slot_type[0] == STACK_SPILL) {
+ verbose(env, " fp%d",
+ (-i - 1) * BPF_REG_SIZE);
+ print_liveness(env, state->stack[i].spilled_ptr.live);
+ verbose(env, "=%s",
reg_type_str[state->stack[i].spilled_ptr.type]);
+ }
+ if (state->stack[i].slot_type[0] == STACK_ZERO)
+ verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE);
}
verbose(env, "\n");
}
-static int copy_stack_state(struct bpf_verifier_state *dst,
- const struct bpf_verifier_state *src)
+static int copy_stack_state(struct bpf_func_state *dst,
+ const struct bpf_func_state *src)
{
if (!src->stack)
return 0;
@@ -302,13 +354,13 @@ static int copy_stack_state(struct bpf_verifier_state *dst,
/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
* make it consume minimal amount of memory. check_stack_write() access from
- * the program calls into realloc_verifier_state() to grow the stack size.
+ * the program calls into realloc_func_state() to grow the stack size.
* Note there is a non-zero 'parent' pointer inside bpf_verifier_state
* which this function copies over. It points to previous bpf_verifier_state
* which is never reallocated
*/
-static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
- bool copy_old)
+static int realloc_func_state(struct bpf_func_state *state, int size,
+ bool copy_old)
{
u32 old_size = state->allocated_stack;
struct bpf_stack_state *new_stack;
@@ -341,10 +393,23 @@ static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
return 0;
}
+static void free_func_state(struct bpf_func_state *state)
+{
+ if (!state)
+ return;
+ kfree(state->stack);
+ kfree(state);
+}
+
static void free_verifier_state(struct bpf_verifier_state *state,
bool free_self)
{
- kfree(state->stack);
+ int i;
+
+ for (i = 0; i <= state->curframe; i++) {
+ free_func_state(state->frame[i]);
+ state->frame[i] = NULL;
+ }
if (free_self)
kfree(state);
}
@@ -352,18 +417,46 @@ static void free_verifier_state(struct bpf_verifier_state *state,
/* copy verifier state from src to dst growing dst stack space
* when necessary to accommodate larger src stack
*/
-static int copy_verifier_state(struct bpf_verifier_state *dst,
- const struct bpf_verifier_state *src)
+static int copy_func_state(struct bpf_func_state *dst,
+ const struct bpf_func_state *src)
{
int err;
- err = realloc_verifier_state(dst, src->allocated_stack, false);
+ err = realloc_func_state(dst, src->allocated_stack, false);
if (err)
return err;
- memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack));
+ memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack));
return copy_stack_state(dst, src);
}
+static int copy_verifier_state(struct bpf_verifier_state *dst_state,
+ const struct bpf_verifier_state *src)
+{
+ struct bpf_func_state *dst;
+ int i, err;
+
+ /* if dst has more stack frames then src frame, free them */
+ for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
+ free_func_state(dst_state->frame[i]);
+ dst_state->frame[i] = NULL;
+ }
+ dst_state->curframe = src->curframe;
+ dst_state->parent = src->parent;
+ for (i = 0; i <= src->curframe; i++) {
+ dst = dst_state->frame[i];
+ if (!dst) {
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst)
+ return -ENOMEM;
+ dst_state->frame[i] = dst;
+ }
+ err = copy_func_state(dst, src->frame[i]);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
int *insn_idx)
{
@@ -416,6 +509,8 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
}
return &elem->st;
err:
+ free_verifier_state(env->cur_state, true);
+ env->cur_state = NULL;
/* pop all elements and return */
while (!pop_stack(env, NULL, NULL));
return NULL;
@@ -449,6 +544,13 @@ static void __mark_reg_known_zero(struct bpf_reg_state *reg)
__mark_reg_known(reg, 0);
}
+static void __mark_reg_const_zero(struct bpf_reg_state *reg)
+{
+ __mark_reg_known(reg, 0);
+ reg->off = 0;
+ reg->type = SCALAR_VALUE;
+}
+
static void mark_reg_known_zero(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
@@ -560,6 +662,7 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg)
reg->id = 0;
reg->off = 0;
reg->var_off = tnum_unknown;
+ reg->frameno = 0;
__mark_reg_unbounded(reg);
}
@@ -568,8 +671,8 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
- /* Something bad happened, let's kill all regs */
- for (regno = 0; regno < MAX_BPF_REG; regno++)
+ /* Something bad happened, let's kill all regs except FP */
+ for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(regs + regno);
return;
}
@@ -587,8 +690,8 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
- /* Something bad happened, let's kill all regs */
- for (regno = 0; regno < MAX_BPF_REG; regno++)
+ /* Something bad happened, let's kill all regs except FP */
+ for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(regs + regno);
return;
}
@@ -596,8 +699,9 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
}
static void init_reg_state(struct bpf_verifier_env *env,
- struct bpf_reg_state *regs)
+ struct bpf_func_state *state)
{
+ struct bpf_reg_state *regs = state->regs;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
@@ -608,41 +712,218 @@ static void init_reg_state(struct bpf_verifier_env *env,
/* frame pointer */
regs[BPF_REG_FP].type = PTR_TO_STACK;
mark_reg_known_zero(env, regs, BPF_REG_FP);
+ regs[BPF_REG_FP].frameno = state->frameno;
/* 1st arg to a function */
regs[BPF_REG_1].type = PTR_TO_CTX;
mark_reg_known_zero(env, regs, BPF_REG_1);
}
+#define BPF_MAIN_FUNC (-1)
+static void init_func_state(struct bpf_verifier_env *env,
+ struct bpf_func_state *state,
+ int callsite, int frameno, int subprogno)
+{
+ state->callsite = callsite;
+ state->frameno = frameno;
+ state->subprogno = subprogno;
+ init_reg_state(env, state);
+}
+
enum reg_arg_type {
SRC_OP, /* register is used as source operand */
DST_OP, /* register is used as destination operand */
DST_OP_NO_MARK /* same as above, check only, don't mark */
};
-static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
+static int cmp_subprogs(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static int find_subprog(struct bpf_verifier_env *env, int off)
+{
+ u32 *p;
+
+ p = bsearch(&off, env->subprog_starts, env->subprog_cnt,
+ sizeof(env->subprog_starts[0]), cmp_subprogs);
+ if (!p)
+ return -ENOENT;
+ return p - env->subprog_starts;
+
+}
+
+static int add_subprog(struct bpf_verifier_env *env, int off)
+{
+ int insn_cnt = env->prog->len;
+ int ret;
+
+ if (off >= insn_cnt || off < 0) {
+ verbose(env, "call to invalid destination\n");
+ return -EINVAL;
+ }
+ ret = find_subprog(env, off);
+ if (ret >= 0)
+ return 0;
+ if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
+ verbose(env, "too many subprograms\n");
+ return -E2BIG;
+ }
+ env->subprog_starts[env->subprog_cnt++] = off;
+ sort(env->subprog_starts, env->subprog_cnt,
+ sizeof(env->subprog_starts[0]), cmp_subprogs, NULL);
+ return 0;
+}
+
+static int check_subprogs(struct bpf_verifier_env *env)
+{
+ int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
+ struct bpf_insn *insn = env->prog->insnsi;
+ int insn_cnt = env->prog->len;
+
+ /* determine subprog starts. The end is one before the next starts */
+ for (i = 0; i < insn_cnt; i++) {
+ if (insn[i].code != (BPF_JMP | BPF_CALL))
+ continue;
+ if (insn[i].src_reg != BPF_PSEUDO_CALL)
+ continue;
+ if (!env->allow_ptr_leaks) {
+ verbose(env, "function calls to other bpf functions are allowed for root only\n");
+ return -EPERM;
+ }
+ if (bpf_prog_is_dev_bound(env->prog->aux)) {
+ verbose(env, "function calls in offloaded programs are not supported yet\n");
+ return -EINVAL;
+ }
+ ret = add_subprog(env, i + insn[i].imm + 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (env->log.level > 1)
+ for (i = 0; i < env->subprog_cnt; i++)
+ verbose(env, "func#%d @%d\n", i, env->subprog_starts[i]);
+
+ /* now check that all jumps are within the same subprog */
+ subprog_start = 0;
+ if (env->subprog_cnt == cur_subprog)
+ subprog_end = insn_cnt;
+ else
+ subprog_end = env->subprog_starts[cur_subprog++];
+ for (i = 0; i < insn_cnt; i++) {
+ u8 code = insn[i].code;
+
+ if (BPF_CLASS(code) != BPF_JMP)
+ goto next;
+ if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
+ goto next;
+ off = i + insn[i].off + 1;
+ if (off < subprog_start || off >= subprog_end) {
+ verbose(env, "jump out of range from insn %d to %d\n", i, off);
+ return -EINVAL;
+ }
+next:
+ if (i == subprog_end - 1) {
+ /* to avoid fall-through from one subprog into another
+ * the last insn of the subprog should be either exit
+ * or unconditional jump back
+ */
+ if (code != (BPF_JMP | BPF_EXIT) &&
+ code != (BPF_JMP | BPF_JA)) {
+ verbose(env, "last insn is not an exit or jmp\n");
+ return -EINVAL;
+ }
+ subprog_start = subprog_end;
+ if (env->subprog_cnt == cur_subprog)
+ subprog_end = insn_cnt;
+ else
+ subprog_end = env->subprog_starts[cur_subprog++];
+ }
+ }
+ return 0;
+}
+
+static
+struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
+ const struct bpf_verifier_state *state,
+ struct bpf_verifier_state *parent,
+ u32 regno)
+{
+ struct bpf_verifier_state *tmp = NULL;
+
+ /* 'parent' could be a state of caller and
+ * 'state' could be a state of callee. In such case
+ * parent->curframe < state->curframe
+ * and it's ok for r1 - r5 registers
+ *
+ * 'parent' could be a callee's state after it bpf_exit-ed.
+ * In such case parent->curframe > state->curframe
+ * and it's ok for r0 only
+ */
+ if (parent->curframe == state->curframe ||
+ (parent->curframe < state->curframe &&
+ regno >= BPF_REG_1 && regno <= BPF_REG_5) ||
+ (parent->curframe > state->curframe &&
+ regno == BPF_REG_0))
+ return parent;
+
+ if (parent->curframe > state->curframe &&
+ regno >= BPF_REG_6) {
+ /* for callee saved regs we have to skip the whole chain
+ * of states that belong to callee and mark as LIVE_READ
+ * the registers before the call
+ */
+ tmp = parent;
+ while (tmp && tmp->curframe != state->curframe) {
+ tmp = tmp->parent;
+ }
+ if (!tmp)
+ goto bug;
+ parent = tmp;
+ } else {
+ goto bug;
+ }
+ return parent;
+bug:
+ verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
+ verbose(env, "regno %d parent frame %d current frame %d\n",
+ regno, parent->curframe, state->curframe);
+ return NULL;
+}
+
+static int mark_reg_read(struct bpf_verifier_env *env,
+ const struct bpf_verifier_state *state,
+ struct bpf_verifier_state *parent,
+ u32 regno)
{
- struct bpf_verifier_state *parent = state->parent;
+ bool writes = parent == state->parent; /* Observe write marks */
if (regno == BPF_REG_FP)
/* We don't need to worry about FP liveness because it's read-only */
- return;
+ return 0;
while (parent) {
/* if read wasn't screened by an earlier write ... */
- if (state->regs[regno].live & REG_LIVE_WRITTEN)
+ if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN)
break;
+ parent = skip_callee(env, state, parent, regno);
+ if (!parent)
+ return -EFAULT;
/* ... then we depend on parent's value */
- parent->regs[regno].live |= REG_LIVE_READ;
+ parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
+ writes = true;
}
+ return 0;
}
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
enum reg_arg_type t)
{
- struct bpf_reg_state *regs = env->cur_state->regs;
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs;
if (regno >= MAX_BPF_REG) {
verbose(env, "R%d is invalid\n", regno);
@@ -655,7 +936,7 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
verbose(env, "R%d !read_ok\n", regno);
return -EACCES;
}
- mark_reg_read(env->cur_state, regno);
+ return mark_reg_read(env, vstate, vstate->parent, regno);
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
@@ -686,17 +967,25 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
}
}
+/* Does this register contain a constant zero? */
+static bool register_is_null(struct bpf_reg_state *reg)
+{
+ return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
+}
+
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
static int check_stack_write(struct bpf_verifier_env *env,
- struct bpf_verifier_state *state, int off,
- int size, int value_regno)
+ struct bpf_func_state *state, /* func where register points to */
+ int off, int size, int value_regno)
{
+ struct bpf_func_state *cur; /* state of the current function */
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
+ enum bpf_reg_type type;
- err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE),
- true);
+ err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
+ true);
if (err)
return err;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
@@ -709,8 +998,9 @@ static int check_stack_write(struct bpf_verifier_env *env,
return -EACCES;
}
+ cur = env->cur_state->frame[env->cur_state->curframe];
if (value_regno >= 0 &&
- is_spillable_regtype(state->regs[value_regno].type)) {
+ is_spillable_regtype((type = cur->regs[value_regno].type))) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
@@ -718,51 +1008,116 @@ static int check_stack_write(struct bpf_verifier_env *env,
return -EACCES;
}
+ if (state != cur && type == PTR_TO_STACK) {
+ verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
+ return -EINVAL;
+ }
+
/* save register state */
- state->stack[spi].spilled_ptr = state->regs[value_regno];
+ state->stack[spi].spilled_ptr = cur->regs[value_regno];
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
for (i = 0; i < BPF_REG_SIZE; i++)
state->stack[spi].slot_type[i] = STACK_SPILL;
} else {
+ u8 type = STACK_MISC;
+
/* regular write of data into stack */
state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
+ /* only mark the slot as written if all 8 bytes were written
+ * otherwise read propagation may incorrectly stop too soon
+ * when stack slots are partially written.
+ * This heuristic means that read propagation will be
+ * conservative, since it will add reg_live_read marks
+ * to stack slots all the way to first state when programs
+ * writes+reads less than 8 bytes
+ */
+ if (size == BPF_REG_SIZE)
+ state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+
+ /* when we zero initialize stack slots mark them as such */
+ if (value_regno >= 0 &&
+ register_is_null(&cur->regs[value_regno]))
+ type = STACK_ZERO;
+
for (i = 0; i < size; i++)
state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
- STACK_MISC;
+ type;
}
return 0;
}
-static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot)
+/* registers of every function are unique and mark_reg_read() propagates
+ * the liveness in the following cases:
+ * - from callee into caller for R1 - R5 that were used as arguments
+ * - from caller into callee for R0 that used as result of the call
+ * - from caller to the same caller skipping states of the callee for R6 - R9,
+ * since R6 - R9 are callee saved by implicit function prologue and
+ * caller's R6 != callee's R6, so when we propagate liveness up to
+ * parent states we need to skip callee states for R6 - R9.
+ *
+ * stack slot marking is different, since stacks of caller and callee are
+ * accessible in both (since caller can pass a pointer to caller's stack to
+ * callee which can pass it to another function), hence mark_stack_slot_read()
+ * has to propagate the stack liveness to all parent states at given frame number.
+ * Consider code:
+ * f1() {
+ * ptr = fp - 8;
+ * *ptr = ctx;
+ * call f2 {
+ * .. = *ptr;
+ * }
+ * .. = *ptr;
+ * }
+ * First *ptr is reading from f1's stack and mark_stack_slot_read() has
+ * to mark liveness at the f1's frame and not f2's frame.
+ * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has
+ * to propagate liveness to f2 states at f1's frame level and further into
+ * f1 states at f1's frame level until write into that stack slot
+ */
+static void mark_stack_slot_read(struct bpf_verifier_env *env,
+ const struct bpf_verifier_state *state,
+ struct bpf_verifier_state *parent,
+ int slot, int frameno)
{
- struct bpf_verifier_state *parent = state->parent;
+ bool writes = parent == state->parent; /* Observe write marks */
while (parent) {
+ if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE)
+ /* since LIVE_WRITTEN mark is only done for full 8-byte
+ * write the read marks are conservative and parent
+ * state may not even have the stack allocated. In such case
+ * end the propagation, since the loop reached beginning
+ * of the function
+ */
+ break;
/* if read wasn't screened by an earlier write ... */
- if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
+ if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
break;
/* ... then we depend on parent's value */
- parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
+ parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
+ writes = true;
}
}
static int check_stack_read(struct bpf_verifier_env *env,
- struct bpf_verifier_state *state, int off, int size,
- int value_regno)
+ struct bpf_func_state *reg_state /* func where register points to */,
+ int off, int size, int value_regno)
{
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
u8 *stype;
- if (state->allocated_stack <= slot) {
+ if (reg_state->allocated_stack <= slot) {
verbose(env, "invalid read from stack off %d+0 size %d\n",
off, size);
return -EACCES;
}
- stype = state->stack[spi].slot_type;
+ stype = reg_state->stack[spi].slot_type;
if (stype[0] == STACK_SPILL) {
if (size != BPF_REG_SIZE) {
@@ -778,21 +1133,44 @@ static int check_stack_read(struct bpf_verifier_env *env,
if (value_regno >= 0) {
/* restore register state from stack */
- state->regs[value_regno] = state->stack[spi].spilled_ptr;
- mark_stack_slot_read(state, spi);
+ state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
+ /* mark reg as written since spilled pointer state likely
+ * has its liveness marks cleared by is_state_visited()
+ * which resets stack/reg liveness for state transitions
+ */
+ state->regs[value_regno].live |= REG_LIVE_WRITTEN;
}
+ mark_stack_slot_read(env, vstate, vstate->parent, spi,
+ reg_state->frameno);
return 0;
} else {
+ int zeros = 0;
+
for (i = 0; i < size; i++) {
- if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) {
- verbose(env, "invalid read from stack off %d+%d size %d\n",
- off, i, size);
- return -EACCES;
+ if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
+ continue;
+ if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
+ zeros++;
+ continue;
+ }
+ verbose(env, "invalid read from stack off %d+%d size %d\n",
+ off, i, size);
+ return -EACCES;
+ }
+ mark_stack_slot_read(env, vstate, vstate->parent, spi,
+ reg_state->frameno);
+ if (value_regno >= 0) {
+ if (zeros == size) {
+ /* any size read into register is zero extended,
+ * so the whole register == const_zero
+ */
+ __mark_reg_const_zero(&state->regs[value_regno]);
+ } else {
+ /* have read misc data from the stack */
+ mark_reg_unknown(env, state->regs, value_regno);
}
+ state->regs[value_regno].live |= REG_LIVE_WRITTEN;
}
- if (value_regno >= 0)
- /* have read misc data from the stack */
- mark_reg_unknown(env, state->regs, value_regno);
return 0;
}
}
@@ -817,7 +1195,8 @@ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
static int check_map_access(struct bpf_verifier_env *env, u32 regno,
int off, int size, bool zero_size_allowed)
{
- struct bpf_verifier_state *state = env->cur_state;
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *reg = &state->regs[regno];
int err;
@@ -881,6 +1260,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
case BPF_PROG_TYPE_XDP:
case BPF_PROG_TYPE_LWT_XMIT:
case BPF_PROG_TYPE_SK_SKB:
+ case BPF_PROG_TYPE_SK_MSG:
if (meta)
return meta->pkt_access;
@@ -943,7 +1323,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
};
if (env->ops->is_valid_access &&
- env->ops->is_valid_access(off, size, t, &info)) {
+ env->ops->is_valid_access(off, size, t, env->prog, &info)) {
/* A non zero info.ctx_field_size indicates that this field is a
* candidate for later verifier transformation to load the whole
* field and then apply a mask when accessed with a narrower
@@ -985,6 +1365,13 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
return reg->type == PTR_TO_CTX;
}
+static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
+{
+ const struct bpf_reg_state *reg = cur_regs(env) + regno;
+
+ return type_is_pkt_pointer(reg->type);
+}
+
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size, bool strict)
@@ -1045,10 +1432,10 @@ static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
}
static int check_ptr_alignment(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg,
- int off, int size)
+ const struct bpf_reg_state *reg, int off,
+ int size, bool strict_alignment_once)
{
- bool strict = env->strict_alignment;
+ bool strict = env->strict_alignment || strict_alignment_once;
const char *pointer_desc = "";
switch (reg->type) {
@@ -1079,6 +1466,103 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
strict);
}
+static int update_stack_depth(struct bpf_verifier_env *env,
+ const struct bpf_func_state *func,
+ int off)
+{
+ u16 stack = env->subprog_stack_depth[func->subprogno];
+
+ if (stack >= -off)
+ return 0;
+
+ /* update known max for given subprogram */
+ env->subprog_stack_depth[func->subprogno] = -off;
+ return 0;
+}
+
+/* starting from main bpf function walk all instructions of the function
+ * and recursively walk all callees that given function can call.
+ * Ignore jump and exit insns.
+ * Since recursion is prevented by check_cfg() this algorithm
+ * only needs a local stack of MAX_CALL_FRAMES to remember callsites
+ */
+static int check_max_stack_depth(struct bpf_verifier_env *env)
+{
+ int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end;
+ struct bpf_insn *insn = env->prog->insnsi;
+ int insn_cnt = env->prog->len;
+ int ret_insn[MAX_CALL_FRAMES];
+ int ret_prog[MAX_CALL_FRAMES];
+
+process_func:
+ /* round up to 32-bytes, since this is granularity
+ * of interpreter stack size
+ */
+ depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+ if (depth > MAX_BPF_STACK) {
+ verbose(env, "combined stack size of %d calls is %d. Too large\n",
+ frame + 1, depth);
+ return -EACCES;
+ }
+continue_func:
+ if (env->subprog_cnt == subprog)
+ subprog_end = insn_cnt;
+ else
+ subprog_end = env->subprog_starts[subprog];
+ for (; i < subprog_end; i++) {
+ if (insn[i].code != (BPF_JMP | BPF_CALL))
+ continue;
+ if (insn[i].src_reg != BPF_PSEUDO_CALL)
+ continue;
+ /* remember insn and function to return to */
+ ret_insn[frame] = i + 1;
+ ret_prog[frame] = subprog;
+
+ /* find the callee */
+ i = i + insn[i].imm + 1;
+ subprog = find_subprog(env, i);
+ if (subprog < 0) {
+ WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+ i);
+ return -EFAULT;
+ }
+ subprog++;
+ frame++;
+ if (frame >= MAX_CALL_FRAMES) {
+ WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
+ return -EFAULT;
+ }
+ goto process_func;
+ }
+ /* end of for() loop means the last insn of the 'subprog'
+ * was reached. Doesn't matter whether it was JA or EXIT
+ */
+ if (frame == 0)
+ return 0;
+ depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+ frame--;
+ i = ret_insn[frame];
+ subprog = ret_prog[frame];
+ goto continue_func;
+}
+
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+static int get_callee_stack_depth(struct bpf_verifier_env *env,
+ const struct bpf_insn *insn, int idx)
+{
+ int start = idx + insn->imm + 1, subprog;
+
+ subprog = find_subprog(env, start);
+ if (subprog < 0) {
+ WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+ start);
+ return -EFAULT;
+ }
+ subprog++;
+ return env->subprog_stack_depth[subprog];
+}
+#endif
+
/* truncate register to smaller size (in bytes)
* must be called with size < BPF_REG_SIZE
*/
@@ -1108,13 +1592,13 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
* if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory
*/
-static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
- int bpf_size, enum bpf_access_type t,
- int value_regno)
+static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
+ int off, int bpf_size, enum bpf_access_type t,
+ int value_regno, bool strict_alignment_once)
{
- struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
+ struct bpf_func_state *state;
int size, err = 0;
size = bpf_size_to_bytes(bpf_size);
@@ -1122,7 +1606,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return size;
/* alignment checks will add in reg->off themselves */
- err = check_ptr_alignment(env, reg, off, size);
+ err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
if (err)
return err;
@@ -1203,8 +1687,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return -EACCES;
}
- if (env->prog->aux->stack_depth < -off)
- env->prog->aux->stack_depth = -off;
+ state = func(env, reg);
+ err = update_stack_depth(env, state, off);
+ if (err)
+ return err;
if (t == BPF_WRITE)
err = check_stack_write(env, state, off, size,
@@ -1265,27 +1751,23 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
return -EACCES;
}
- if (is_ctx_reg(env, insn->dst_reg)) {
- verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
- insn->dst_reg);
+ if (is_ctx_reg(env, insn->dst_reg) ||
+ is_pkt_reg(env, insn->dst_reg)) {
+ verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
+ insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
+ "context" : "packet");
return -EACCES;
}
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
- BPF_SIZE(insn->code), BPF_READ, -1);
+ BPF_SIZE(insn->code), BPF_READ, -1, true);
if (err)
return err;
/* check whether atomic_add can write into the same memory */
return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
- BPF_SIZE(insn->code), BPF_WRITE, -1);
-}
-
-/* Does this register contain a constant zero? */
-static bool register_is_null(struct bpf_reg_state reg)
-{
- return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0);
+ BPF_SIZE(insn->code), BPF_WRITE, -1, true);
}
/* when register 'regno' is passed into function that will read 'access_size'
@@ -1298,32 +1780,32 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
- struct bpf_verifier_state *state = env->cur_state;
- struct bpf_reg_state *regs = state->regs;
+ struct bpf_reg_state *reg = cur_regs(env) + regno;
+ struct bpf_func_state *state = func(env, reg);
int off, i, slot, spi;
- if (regs[regno].type != PTR_TO_STACK) {
+ if (reg->type != PTR_TO_STACK) {
/* Allow zero-byte read from NULL, regardless of pointer type */
if (zero_size_allowed && access_size == 0 &&
- register_is_null(regs[regno]))
+ register_is_null(reg))
return 0;
verbose(env, "R%d type=%s expected=%s\n", regno,
- reg_type_str[regs[regno].type],
+ reg_type_str[reg->type],
reg_type_str[PTR_TO_STACK]);
return -EACCES;
}
/* Only allow fixed-offset stack reads */
- if (!tnum_is_const(regs[regno].var_off)) {
+ if (!tnum_is_const(reg->var_off)) {
char tn_buf[48];
- tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf);
return -EACCES;
}
- off = regs[regno].off + regs[regno].var_off.value;
+ off = reg->off + reg->var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
@@ -1331,9 +1813,6 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
return -EACCES;
}
- if (env->prog->aux->stack_depth < -off)
- env->prog->aux->stack_depth = -off;
-
if (meta && meta->raw_mode) {
meta->access_size = access_size;
meta->regno = regno;
@@ -1341,17 +1820,32 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
}
for (i = 0; i < access_size; i++) {
+ u8 *stype;
+
slot = -(off + i) - 1;
spi = slot / BPF_REG_SIZE;
- if (state->allocated_stack <= slot ||
- state->stack[spi].slot_type[slot % BPF_REG_SIZE] !=
- STACK_MISC) {
- verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
- off, i, access_size);
- return -EACCES;
+ if (state->allocated_stack <= slot)
+ goto err;
+ stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
+ if (*stype == STACK_MISC)
+ goto mark;
+ if (*stype == STACK_ZERO) {
+ /* helper can write anything into the stack */
+ *stype = STACK_MISC;
+ goto mark;
}
+err:
+ verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
+ off, i, access_size);
+ return -EACCES;
+mark:
+ /* reading any byte out of 8-byte 'spill_slot' will cause
+ * the whole slot to be marked as 'read'
+ */
+ mark_stack_slot_read(env, env->cur_state, env->cur_state->parent,
+ spi, state->frameno);
}
- return 0;
+ return update_stack_depth(env, state, off);
}
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
@@ -1374,6 +1868,19 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
}
}
+static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
+{
+ return type == ARG_PTR_TO_MEM ||
+ type == ARG_PTR_TO_MEM_OR_NULL ||
+ type == ARG_PTR_TO_UNINIT_MEM;
+}
+
+static bool arg_type_is_mem_size(enum bpf_arg_type type)
+{
+ return type == ARG_CONST_SIZE ||
+ type == ARG_CONST_SIZE_OR_ZERO;
+}
+
static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
enum bpf_arg_type arg_type,
struct bpf_call_arg_meta *meta)
@@ -1423,15 +1930,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
expected_type = PTR_TO_CTX;
if (type != expected_type)
goto err_type;
- } else if (arg_type == ARG_PTR_TO_MEM ||
- arg_type == ARG_PTR_TO_MEM_OR_NULL ||
- arg_type == ARG_PTR_TO_UNINIT_MEM) {
+ } else if (arg_type_is_mem_ptr(arg_type)) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
* passed in as argument, it's a SCALAR_VALUE type. Final test
* happens during stack boundary checking.
*/
- if (register_is_null(*reg) &&
+ if (register_is_null(reg) &&
arg_type == ARG_PTR_TO_MEM_OR_NULL)
/* final test in check_stack_boundary() */;
else if (!type_is_pkt_pointer(type) &&
@@ -1486,25 +1991,12 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
err = check_stack_boundary(env, regno,
meta->map_ptr->value_size,
false, NULL);
- } else if (arg_type == ARG_CONST_SIZE ||
- arg_type == ARG_CONST_SIZE_OR_ZERO) {
+ } else if (arg_type_is_mem_size(arg_type)) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
- /* bpf_xxx(..., buf, len) call will access 'len' bytes
- * from stack pointer 'buf'. Check it
- * note: regno == len, regno - 1 == buf
- */
- if (regno == 0) {
- /* kernel subsystem misconfigured verifier */
- verbose(env,
- "ARG_CONST_SIZE cannot be first argument\n");
- return -EACCES;
- }
-
/* The register is SCALAR_VALUE; the access check
* happens using its boundaries.
*/
-
if (!tnum_is_const(reg->var_off))
/* For unprivileged variable accesses, disable raw
* mode so that the program is required to
@@ -1592,7 +2084,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SOCKMAP:
if (func_id != BPF_FUNC_sk_redirect_map &&
func_id != BPF_FUNC_sock_map_update &&
- func_id != BPF_FUNC_map_delete_elem)
+ func_id != BPF_FUNC_map_delete_elem &&
+ func_id != BPF_FUNC_msg_redirect_map)
goto error;
break;
default:
@@ -1604,6 +2097,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_FUNC_tail_call:
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
goto error;
+ if (env->subprog_cnt) {
+ verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
+ return -EINVAL;
+ }
break;
case BPF_FUNC_perf_event_read:
case BPF_FUNC_perf_event_output:
@@ -1626,6 +2123,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
goto error;
break;
case BPF_FUNC_sk_redirect_map:
+ case BPF_FUNC_msg_redirect_map:
if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
goto error;
break;
@@ -1644,7 +2142,7 @@ error:
return -EINVAL;
}
-static int check_raw_mode(const struct bpf_func_proto *fn)
+static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
{
int count = 0;
@@ -1659,15 +2157,52 @@ static int check_raw_mode(const struct bpf_func_proto *fn)
if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
count++;
- return count > 1 ? -EINVAL : 0;
+ /* We only support one arg being in raw mode at the moment,
+ * which is sufficient for the helper functions we have
+ * right now.
+ */
+ return count <= 1;
+}
+
+static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
+ enum bpf_arg_type arg_next)
+{
+ return (arg_type_is_mem_ptr(arg_curr) &&
+ !arg_type_is_mem_size(arg_next)) ||
+ (!arg_type_is_mem_ptr(arg_curr) &&
+ arg_type_is_mem_size(arg_next));
+}
+
+static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
+{
+ /* bpf_xxx(..., buf, len) call will access 'len'
+ * bytes from memory 'buf'. Both arg types need
+ * to be paired, so make sure there's no buggy
+ * helper function specification.
+ */
+ if (arg_type_is_mem_size(fn->arg1_type) ||
+ arg_type_is_mem_ptr(fn->arg5_type) ||
+ check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
+ check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
+ check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
+ check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
+ return false;
+
+ return true;
+}
+
+static int check_func_proto(const struct bpf_func_proto *fn)
+{
+ return check_raw_mode_ok(fn) &&
+ check_arg_pair_ok(fn) ? 0 : -EINVAL;
}
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
* are now invalid, so turn them into unknown SCALAR_VALUE.
*/
-static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
+static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
+ struct bpf_func_state *state)
{
- struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *regs = state->regs, *reg;
int i;
@@ -1684,7 +2219,121 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
}
}
-static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
+static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
+{
+ struct bpf_verifier_state *vstate = env->cur_state;
+ int i;
+
+ for (i = 0; i <= vstate->curframe; i++)
+ __clear_all_pkt_pointers(env, vstate->frame[i]);
+}
+
+static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ int *insn_idx)
+{
+ struct bpf_verifier_state *state = env->cur_state;
+ struct bpf_func_state *caller, *callee;
+ int i, subprog, target_insn;
+
+ if (state->curframe + 1 >= MAX_CALL_FRAMES) {
+ verbose(env, "the call stack of %d frames is too deep\n",
+ state->curframe + 2);
+ return -E2BIG;
+ }
+
+ target_insn = *insn_idx + insn->imm;
+ subprog = find_subprog(env, target_insn + 1);
+ if (subprog < 0) {
+ verbose(env, "verifier bug. No program starts at insn %d\n",
+ target_insn + 1);
+ return -EFAULT;
+ }
+
+ caller = state->frame[state->curframe];
+ if (state->frame[state->curframe + 1]) {
+ verbose(env, "verifier bug. Frame %d already allocated\n",
+ state->curframe + 1);
+ return -EFAULT;
+ }
+
+ callee = kzalloc(sizeof(*callee), GFP_KERNEL);
+ if (!callee)
+ return -ENOMEM;
+ state->frame[state->curframe + 1] = callee;
+
+ /* callee cannot access r0, r6 - r9 for reading and has to write
+ * into its own stack before reading from it.
+ * callee can read/write into caller's stack
+ */
+ init_func_state(env, callee,
+ /* remember the callsite, it will be used by bpf_exit */
+ *insn_idx /* callsite */,
+ state->curframe + 1 /* frameno within this callchain */,
+ subprog + 1 /* subprog number within this prog */);
+
+ /* copy r1 - r5 args that callee can access */
+ for (i = BPF_REG_1; i <= BPF_REG_5; i++)
+ callee->regs[i] = caller->regs[i];
+
+ /* after the call regsiters r0 - r5 were scratched */
+ for (i = 0; i < CALLER_SAVED_REGS; i++) {
+ mark_reg_not_init(env, caller->regs, caller_saved[i]);
+ check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
+ }
+
+ /* only increment it after check_reg_arg() finished */
+ state->curframe++;
+
+ /* and go analyze first insn of the callee */
+ *insn_idx = target_insn;
+
+ if (env->log.level) {
+ verbose(env, "caller:\n");
+ print_verifier_state(env, caller);
+ verbose(env, "callee:\n");
+ print_verifier_state(env, callee);
+ }
+ return 0;
+}
+
+static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
+{
+ struct bpf_verifier_state *state = env->cur_state;
+ struct bpf_func_state *caller, *callee;
+ struct bpf_reg_state *r0;
+
+ callee = state->frame[state->curframe];
+ r0 = &callee->regs[BPF_REG_0];
+ if (r0->type == PTR_TO_STACK) {
+ /* technically it's ok to return caller's stack pointer
+ * (or caller's caller's pointer) back to the caller,
+ * since these pointers are valid. Only current stack
+ * pointer will be invalid as soon as function exits,
+ * but let's be conservative
+ */
+ verbose(env, "cannot return stack pointer to the caller\n");
+ return -EINVAL;
+ }
+
+ state->curframe--;
+ caller = state->frame[state->curframe];
+ /* return to the caller whatever r0 had in the callee */
+ caller->regs[BPF_REG_0] = *r0;
+
+ *insn_idx = callee->callsite + 1;
+ if (env->log.level) {
+ verbose(env, "returning from callee:\n");
+ print_verifier_state(env, callee);
+ verbose(env, "to caller at %d:\n", *insn_idx);
+ print_verifier_state(env, caller);
+ }
+ /* clear everything in the callee */
+ free_func_state(callee);
+ state->frame[state->curframe + 1] = NULL;
+ return 0;
+}
+
+static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
{
const struct bpf_func_proto *fn = NULL;
struct bpf_reg_state *regs;
@@ -1700,8 +2349,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
}
if (env->ops->get_func_proto)
- fn = env->ops->get_func_proto(func_id);
-
+ fn = env->ops->get_func_proto(func_id, env->prog);
if (!fn) {
verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
func_id);
@@ -1725,10 +2373,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
- /* We only support one arg being in raw mode at the moment, which
- * is sufficient for the helper functions we have right now.
- */
- err = check_raw_mode(fn);
+ err = check_func_proto(fn);
if (err) {
verbose(env, "kernel subsystem misconfigured func %s#%d\n",
func_id_name(func_id), func_id);
@@ -1763,7 +2408,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
* is inferred from register state.
*/
for (i = 0; i < meta.access_size; i++) {
- err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
+ err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
+ BPF_WRITE, -1, false);
if (err)
return err;
}
@@ -1884,7 +2530,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg)
{
- struct bpf_reg_state *regs = cur_regs(env), *dst_reg;
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs, *dst_reg;
bool known = tnum_is_const(off_reg->var_off);
s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
@@ -2319,7 +2967,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
- struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
u8 opcode = BPF_OP(insn->code);
@@ -2370,12 +3020,12 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
/* Got here implies adding two SCALAR_VALUEs */
if (WARN_ON_ONCE(ptr_reg)) {
- print_verifier_state(env, env->cur_state);
+ print_verifier_state(env, state);
verbose(env, "verifier internal error: unexpected ptr_reg\n");
return -EINVAL;
}
if (WARN_ON(!src_reg)) {
- print_verifier_state(env, env->cur_state);
+ print_verifier_state(env, state);
verbose(env, "verifier internal error: no src_reg\n");
return -EINVAL;
}
@@ -2537,14 +3187,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0;
}
-static void find_good_pkt_pointers(struct bpf_verifier_state *state,
+static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type,
bool range_right_open)
{
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *reg;
u16 new_range;
- int i;
+ int i, j;
if (dst_reg->off < 0 ||
(dst_reg->off == 0 && range_right_open))
@@ -2614,12 +3265,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
/* keep the maximum range already checked */
regs[i].range = max(regs[i].range, new_range);
- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].slot_type[0] != STACK_SPILL)
- continue;
- reg = &state->stack[i].spilled_ptr;
- if (reg->type == type && reg->id == dst_reg->id)
- reg->range = max(reg->range, new_range);
+ for (j = 0; j <= vstate->curframe; j++) {
+ state = vstate->frame[j];
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+ if (state->stack[i].slot_type[0] != STACK_SPILL)
+ continue;
+ reg = &state->stack[i].spilled_ptr;
+ if (reg->type == type && reg->id == dst_reg->id)
+ reg->range = max(reg->range, new_range);
+ }
}
}
@@ -2857,20 +3511,24 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
/* The logic is similar to find_good_pkt_pointers(), both could eventually
* be folded together at some point.
*/
-static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
+static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno,
bool is_null)
{
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs;
u32 id = regs[regno].id;
- int i;
+ int i, j;
for (i = 0; i < MAX_BPF_REG; i++)
mark_map_reg(regs, i, id, is_null);
- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].slot_type[0] != STACK_SPILL)
- continue;
- mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
+ for (j = 0; j <= vstate->curframe; j++) {
+ state = vstate->frame[j];
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+ if (state->stack[i].slot_type[0] != STACK_SPILL)
+ continue;
+ mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
+ }
}
}
@@ -2970,8 +3628,10 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
- struct bpf_verifier_state *other_branch, *this_branch = env->cur_state;
- struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
+ struct bpf_verifier_state *this_branch = env->cur_state;
+ struct bpf_verifier_state *other_branch;
+ struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
+ struct bpf_reg_state *dst_reg, *other_branch_regs;
u8 opcode = BPF_OP(insn->code);
int err;
@@ -3014,8 +3674,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (BPF_SRC(insn->code) == BPF_K &&
(opcode == BPF_JEQ || opcode == BPF_JNE) &&
dst_reg->type == SCALAR_VALUE &&
- tnum_equals_const(dst_reg->var_off, insn->imm)) {
- if (opcode == BPF_JEQ) {
+ tnum_is_const(dst_reg->var_off)) {
+ if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
+ (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
/* if (imm == imm) goto pc+off;
* only follow the goto, ignore fall-through
*/
@@ -3033,6 +3694,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
if (!other_branch)
return -EFAULT;
+ other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
/* detect if we are comparing against a constant value so we can adjust
* our min/max values for our dst register.
@@ -3045,22 +3707,22 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (dst_reg->type == SCALAR_VALUE &&
regs[insn->src_reg].type == SCALAR_VALUE) {
if (tnum_is_const(regs[insn->src_reg].var_off))
- reg_set_min_max(&other_branch->regs[insn->dst_reg],
+ reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, regs[insn->src_reg].var_off.value,
opcode);
else if (tnum_is_const(dst_reg->var_off))
- reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
+ reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
&regs[insn->src_reg],
dst_reg->var_off.value, opcode);
else if (opcode == BPF_JEQ || opcode == BPF_JNE)
/* Comparing for equality, we can combine knowledge */
- reg_combine_min_max(&other_branch->regs[insn->src_reg],
- &other_branch->regs[insn->dst_reg],
+ reg_combine_min_max(&other_branch_regs[insn->src_reg],
+ &other_branch_regs[insn->dst_reg],
&regs[insn->src_reg],
&regs[insn->dst_reg], opcode);
}
} else if (dst_reg->type == SCALAR_VALUE) {
- reg_set_min_max(&other_branch->regs[insn->dst_reg],
+ reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, insn->imm, opcode);
}
@@ -3081,7 +3743,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return -EACCES;
}
if (env->log.level)
- print_verifier_state(env, this_branch);
+ print_verifier_state(env, this_branch->frame[this_branch->curframe]);
return 0;
}
@@ -3166,6 +3828,18 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
+ if (env->subprog_cnt) {
+ /* when program has LD_ABS insn JITs and interpreter assume
+ * that r1 == ctx == skb which is not the case for callees
+ * that can have arbitrary arguments. It's problematic
+ * for main prog as well since JITs would need to analyze
+ * all functions in order to make proper register save/restore
+ * decisions in the main prog. Hence disallow LD_ABS with calls
+ */
+ verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
+ return -EINVAL;
+ }
+
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
@@ -3213,6 +3887,7 @@ static int check_return_code(struct bpf_verifier_env *env)
switch (env->prog->type) {
case BPF_PROG_TYPE_CGROUP_SKB:
case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
case BPF_PROG_TYPE_SOCK_OPS:
case BPF_PROG_TYPE_CGROUP_DEVICE:
break;
@@ -3342,6 +4017,10 @@ static int check_cfg(struct bpf_verifier_env *env)
int ret = 0;
int i, t;
+ ret = check_subprogs(env);
+ if (ret < 0)
+ return ret;
+
insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_state)
return -ENOMEM;
@@ -3374,6 +4053,14 @@ peek_stack:
goto err_free;
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
+ if (insns[t].src_reg == BPF_PSEUDO_CALL) {
+ env->explored_states[t] = STATE_LIST_MARK;
+ ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
+ if (ret == 1)
+ goto peek_stack;
+ else if (ret < 0)
+ goto err_free;
+ }
} else if (opcode == BPF_JA) {
if (BPF_SRC(insns[t].code) != BPF_K) {
ret = -EINVAL;
@@ -3492,11 +4179,21 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
struct idpair *idmap)
{
+ bool equal;
+
if (!(rold->live & REG_LIVE_READ))
/* explored state didn't use this */
return true;
- if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
+ equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0;
+
+ if (rold->type == PTR_TO_STACK)
+ /* two stack pointers are equal only if they're pointing to
+ * the same stack frame, since fp-8 in foo != fp-8 in bar
+ */
+ return equal && rold->frameno == rcur->frameno;
+
+ if (equal)
return true;
if (rold->type == NOT_INIT)
@@ -3568,7 +4265,6 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_CTX:
case CONST_PTR_TO_MAP:
- case PTR_TO_STACK:
case PTR_TO_PACKET_END:
/* Only valid matches are exact, which memcmp() above
* would have accepted
@@ -3583,8 +4279,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
return false;
}
-static bool stacksafe(struct bpf_verifier_state *old,
- struct bpf_verifier_state *cur,
+static bool stacksafe(struct bpf_func_state *old,
+ struct bpf_func_state *cur,
struct idpair *idmap)
{
int i, spi;
@@ -3602,8 +4298,19 @@ static bool stacksafe(struct bpf_verifier_state *old,
for (i = 0; i < old->allocated_stack; i++) {
spi = i / BPF_REG_SIZE;
+ if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
+ /* explored state didn't use this */
+ continue;
+
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;
+ /* if old state was safe with misc data in the stack
+ * it will be safe with zero-initialized stack.
+ * The opposite is not true
+ */
+ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
+ cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
+ continue;
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
cur->stack[spi].slot_type[i % BPF_REG_SIZE])
/* Ex: old explored (safe) state has STACK_SPILL in
@@ -3660,9 +4367,8 @@ static bool stacksafe(struct bpf_verifier_state *old,
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
-static bool states_equal(struct bpf_verifier_env *env,
- struct bpf_verifier_state *old,
- struct bpf_verifier_state *cur)
+static bool func_states_equal(struct bpf_func_state *old,
+ struct bpf_func_state *cur)
{
struct idpair *idmap;
bool ret = false;
@@ -3686,71 +4392,72 @@ out_free:
return ret;
}
+static bool states_equal(struct bpf_verifier_env *env,
+ struct bpf_verifier_state *old,
+ struct bpf_verifier_state *cur)
+{
+ int i;
+
+ if (old->curframe != cur->curframe)
+ return false;
+
+ /* for states to be equal callsites have to be the same
+ * and all frame states need to be equivalent
+ */
+ for (i = 0; i <= old->curframe; i++) {
+ if (old->frame[i]->callsite != cur->frame[i]->callsite)
+ return false;
+ if (!func_states_equal(old->frame[i], cur->frame[i]))
+ return false;
+ }
+ return true;
+}
+
/* A write screens off any subsequent reads; but write marks come from the
- * straight-line code between a state and its parent. When we arrive at a
- * jump target (in the first iteration of the propagate_liveness() loop),
- * we didn't arrive by the straight-line code, so read marks in state must
- * propagate to parent regardless of state's write marks.
+ * straight-line code between a state and its parent. When we arrive at an
+ * equivalent state (jump target or such) we didn't arrive by the straight-line
+ * code, so read marks in the state must propagate to the parent regardless
+ * of the state's write marks. That's what 'parent == state->parent' comparison
+ * in mark_reg_read() and mark_stack_slot_read() is for.
*/
-static bool do_propagate_liveness(const struct bpf_verifier_state *state,
- struct bpf_verifier_state *parent)
+static int propagate_liveness(struct bpf_verifier_env *env,
+ const struct bpf_verifier_state *vstate,
+ struct bpf_verifier_state *vparent)
{
- bool writes = parent == state->parent; /* Observe write marks */
- bool touched = false; /* any changes made? */
- int i;
+ int i, frame, err = 0;
+ struct bpf_func_state *state, *parent;
- if (!parent)
- return touched;
+ if (vparent->curframe != vstate->curframe) {
+ WARN(1, "propagate_live: parent frame %d current frame %d\n",
+ vparent->curframe, vstate->curframe);
+ return -EFAULT;
+ }
/* Propagate read liveness of registers... */
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
/* We don't need to worry about FP liveness because it's read-only */
for (i = 0; i < BPF_REG_FP; i++) {
- if (parent->regs[i].live & REG_LIVE_READ)
- continue;
- if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
+ if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
continue;
- if (state->regs[i].live & REG_LIVE_READ) {
- parent->regs[i].live |= REG_LIVE_READ;
- touched = true;
+ if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
+ err = mark_reg_read(env, vstate, vparent, i);
+ if (err)
+ return err;
}
}
+
/* ... and stack slots */
- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
- i < parent->allocated_stack / BPF_REG_SIZE; i++) {
- if (parent->stack[i].slot_type[0] != STACK_SPILL)
- continue;
- if (state->stack[i].slot_type[0] != STACK_SPILL)
- continue;
- if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
- continue;
- if (writes &&
- (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN))
- continue;
- if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) {
- parent->stack[i].spilled_ptr.live |= REG_LIVE_READ;
- touched = true;
+ for (frame = 0; frame <= vstate->curframe; frame++) {
+ state = vstate->frame[frame];
+ parent = vparent->frame[frame];
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
+ i < parent->allocated_stack / BPF_REG_SIZE; i++) {
+ if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
+ continue;
+ if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
+ mark_stack_slot_read(env, vstate, vparent, i, frame);
}
}
- return touched;
-}
-
-/* "parent" is "a state from which we reach the current state", but initially
- * it is not the state->parent (i.e. "the state whose straight-line code leads
- * to the current state"), instead it is the state that happened to arrive at
- * a (prunable) equivalent of the current state. See comment above
- * do_propagate_liveness() for consequences of this.
- * This function is just a more efficient way of calling mark_reg_read() or
- * mark_stack_slot_read() on each reg in "parent" that is read in "state",
- * though it requires that parent != state->parent in the call arguments.
- */
-static void propagate_liveness(const struct bpf_verifier_state *state,
- struct bpf_verifier_state *parent)
-{
- while (do_propagate_liveness(state, parent)) {
- /* Something changed, so we need to feed those changes onward */
- state = parent;
- parent = state->parent;
- }
+ return err;
}
static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
@@ -3758,7 +4465,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *cur = env->cur_state;
- int i, err;
+ int i, j, err;
sl = env->explored_states[insn_idx];
if (!sl)
@@ -3779,7 +4486,9 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* they'll be immediately forgotten as we're pruning
* this state and will pop a new one.
*/
- propagate_liveness(&sl->state, cur);
+ err = propagate_liveness(env, &sl->state, cur);
+ if (err)
+ return err;
return 1;
}
sl = sl->next;
@@ -3787,9 +4496,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
/* there were no equivalent states, remember current one.
* technically the current state is not proven to be safe yet,
- * but it will either reach bpf_exit (which means it's safe) or
- * it will be rejected. Since there are no loops, we won't be
- * seeing this 'insn_idx' instruction again on the way to bpf_exit
+ * but it will either reach outer most bpf_exit (which means it's safe)
+ * or it will be rejected. Since there are no loops, we won't be
+ * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
+ * again on the way to bpf_exit
*/
new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
if (!new_sl)
@@ -3813,19 +4523,15 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* explored_states can get read marks.)
*/
for (i = 0; i < BPF_REG_FP; i++)
- cur->regs[i].live = REG_LIVE_NONE;
- for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++)
- if (cur->stack[i].slot_type[0] == STACK_SPILL)
- cur->stack[i].spilled_ptr.live = REG_LIVE_NONE;
- return 0;
-}
+ cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
-static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
- int insn_idx, int prev_insn_idx)
-{
- if (env->dev_ops && env->dev_ops->insn_hook)
- return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
+ /* all stack frames are accessible from callee, clear them all */
+ for (j = 0; j <= cur->curframe; j++) {
+ struct bpf_func_state *frame = cur->frame[j];
+ for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++)
+ frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
+ }
return 0;
}
@@ -3834,7 +4540,7 @@ static int do_check(struct bpf_verifier_env *env)
struct bpf_verifier_state *state;
struct bpf_insn *insns = env->prog->insnsi;
struct bpf_reg_state *regs;
- int insn_cnt = env->prog->len;
+ int insn_cnt = env->prog->len, i;
int insn_idx, prev_insn_idx = 0;
int insn_processed = 0;
bool do_print_state = false;
@@ -3842,9 +4548,18 @@ static int do_check(struct bpf_verifier_env *env)
state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
if (!state)
return -ENOMEM;
- env->cur_state = state;
- init_reg_state(env, state->regs);
+ state->curframe = 0;
state->parent = NULL;
+ state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
+ if (!state->frame[0]) {
+ kfree(state);
+ return -ENOMEM;
+ }
+ env->cur_state = state;
+ init_func_state(env, state->frame[0],
+ BPF_MAIN_FUNC /* callsite */,
+ 0 /* frameno */,
+ 0 /* subprogno, zero == main subprog */);
insn_idx = 0;
for (;;) {
struct bpf_insn *insn;
@@ -3891,19 +4606,26 @@ static int do_check(struct bpf_verifier_env *env)
else
verbose(env, "\nfrom %d to %d:",
prev_insn_idx, insn_idx);
- print_verifier_state(env, state);
+ print_verifier_state(env, state->frame[state->curframe]);
do_print_state = false;
}
if (env->log.level) {
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = verbose,
+ .private_data = env,
+ };
+
verbose(env, "%d: ", insn_idx);
- print_bpf_insn(verbose, env, insn,
- env->allow_ptr_leaks);
+ print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
}
- err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
- if (err)
- return err;
+ if (bpf_prog_is_dev_bound(env->prog->aux)) {
+ err = bpf_prog_offload_verify_insn(env, insn_idx,
+ prev_insn_idx);
+ if (err)
+ return err;
+ }
regs = cur_regs(env);
env->insn_aux_data[insn_idx].seen = true;
@@ -3933,7 +4655,7 @@ static int do_check(struct bpf_verifier_env *env)
*/
err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ,
- insn->dst_reg);
+ insn->dst_reg, false);
if (err)
return err;
@@ -3985,7 +4707,7 @@ static int do_check(struct bpf_verifier_env *env)
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
- insn->src_reg);
+ insn->src_reg, false);
if (err)
return err;
@@ -4020,7 +4742,7 @@ static int do_check(struct bpf_verifier_env *env)
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
- -1);
+ -1, false);
if (err)
return err;
@@ -4030,13 +4752,17 @@ static int do_check(struct bpf_verifier_env *env)
if (opcode == BPF_CALL) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->off != 0 ||
- insn->src_reg != BPF_REG_0 ||
+ (insn->src_reg != BPF_REG_0 &&
+ insn->src_reg != BPF_PSEUDO_CALL) ||
insn->dst_reg != BPF_REG_0) {
verbose(env, "BPF_CALL uses reserved fields\n");
return -EINVAL;
}
- err = check_call(env, insn->imm, insn_idx);
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ err = check_func_call(env, insn, &insn_idx);
+ else
+ err = check_helper_call(env, insn->imm, insn_idx);
if (err)
return err;
@@ -4061,6 +4787,16 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
+ if (state->curframe) {
+ /* exit from nested function */
+ prev_insn_idx = insn_idx;
+ err = prepare_func_exit(env, &insn_idx);
+ if (err)
+ return err;
+ do_print_state = true;
+ continue;
+ }
+
/* eBPF calling convetion is such that R0 is used
* to return the value from eBPF program.
* Make sure that it's readable at this time
@@ -4121,8 +4857,17 @@ process_bpf_exit:
insn_idx++;
}
- verbose(env, "processed %d insns, stack depth %d\n", insn_processed,
- env->prog->aux->stack_depth);
+ verbose(env, "processed %d insns (limit %d), stack depth ",
+ insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
+ for (i = 0; i < env->subprog_cnt + 1; i++) {
+ u32 depth = env->subprog_stack_depth[i];
+
+ verbose(env, "%d", depth);
+ if (i + 1 < env->subprog_cnt + 1)
+ verbose(env, "+");
+ }
+ verbose(env, "\n");
+ env->prog->aux->stack_depth = env->subprog_stack_depth[0];
return 0;
}
@@ -4155,6 +4900,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
return -EINVAL;
}
}
+
+ if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
+ !bpf_offload_dev_match(prog, map)) {
+ verbose(env, "offload device mismatch between prog and map\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -4252,6 +5004,13 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
next_insn:
insn++;
i++;
+ continue;
+ }
+
+ /* Basic sanity check before we invest more work here. */
+ if (!bpf_opcode_in_insntable(insn->code)) {
+ verbose(env, "unknown opcode %02x\n", insn->code);
+ return -EINVAL;
}
}
@@ -4308,6 +5067,19 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
return 0;
}
+static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
+{
+ int i;
+
+ if (len == 1)
+ return;
+ for (i = 0; i < env->subprog_cnt; i++) {
+ if (env->subprog_starts[i] < off)
+ continue;
+ env->subprog_starts[i] += len - 1;
+ }
+}
+
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
const struct bpf_insn *patch, u32 len)
{
@@ -4318,17 +5090,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
return NULL;
if (adjust_insn_aux_data(env, new_prog->len, off, len))
return NULL;
+ adjust_subprog_starts(env, off, len);
return new_prog;
}
-/* The verifier does more data flow analysis than llvm and will not explore
- * branches that are dead at run time. Malicious programs can have dead code
- * too. Therefore replace all dead at-run-time code with nops.
+/* The verifier does more data flow analysis than llvm and will not
+ * explore branches that are dead at run time. Malicious programs can
+ * have dead code too. Therefore replace all dead at-run-time code
+ * with 'ja -1'.
+ *
+ * Just nops are not optimal, e.g. if they would sit at the end of the
+ * program and through another bug we would manage to jump there, then
+ * we'd execute beyond program memory otherwise. Returning exception
+ * code also wouldn't work since we can have subprogs where the dead
+ * code could be located.
*/
static void sanitize_dead_code(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
- struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
+ struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
struct bpf_insn *insn = env->prog->insnsi;
const int insn_cnt = env->prog->len;
int i;
@@ -4336,7 +5116,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++) {
if (aux_data[i].seen)
continue;
- memcpy(insn + i, &nop, sizeof(nop));
+ memcpy(insn + i, &trap, sizeof(trap));
}
}
@@ -4452,6 +5232,180 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
return 0;
}
+static int jit_subprogs(struct bpf_verifier_env *env)
+{
+ struct bpf_prog *prog = env->prog, **func, *tmp;
+ int i, j, subprog_start, subprog_end = 0, len, subprog;
+ struct bpf_insn *insn;
+ void *old_bpf_func;
+ int err = -ENOMEM;
+
+ if (env->subprog_cnt == 0)
+ return 0;
+
+ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+ if (insn->code != (BPF_JMP | BPF_CALL) ||
+ insn->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ subprog = find_subprog(env, i + insn->imm + 1);
+ if (subprog < 0) {
+ WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+ i + insn->imm + 1);
+ return -EFAULT;
+ }
+ /* temporarily remember subprog id inside insn instead of
+ * aux_data, since next loop will split up all insns into funcs
+ */
+ insn->off = subprog + 1;
+ /* remember original imm in case JIT fails and fallback
+ * to interpreter will be needed
+ */
+ env->insn_aux_data[i].call_imm = insn->imm;
+ /* point imm to __bpf_call_base+1 from JITs point of view */
+ insn->imm = 1;
+ }
+
+ func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ for (i = 0; i <= env->subprog_cnt; i++) {
+ subprog_start = subprog_end;
+ if (env->subprog_cnt == i)
+ subprog_end = prog->len;
+ else
+ subprog_end = env->subprog_starts[i];
+
+ len = subprog_end - subprog_start;
+ func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
+ if (!func[i])
+ goto out_free;
+ memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
+ len * sizeof(struct bpf_insn));
+ func[i]->type = prog->type;
+ func[i]->len = len;
+ if (bpf_prog_calc_tag(func[i]))
+ goto out_free;
+ func[i]->is_func = 1;
+ /* Use bpf_prog_F_tag to indicate functions in stack traces.
+ * Long term would need debug info to populate names
+ */
+ func[i]->aux->name[0] = 'F';
+ func[i]->aux->stack_depth = env->subprog_stack_depth[i];
+ func[i]->jit_requested = 1;
+ func[i] = bpf_int_jit_compile(func[i]);
+ if (!func[i]->jited) {
+ err = -ENOTSUPP;
+ goto out_free;
+ }
+ cond_resched();
+ }
+ /* at this point all bpf functions were successfully JITed
+ * now populate all bpf_calls with correct addresses and
+ * run last pass of JIT
+ */
+ for (i = 0; i <= env->subprog_cnt; i++) {
+ insn = func[i]->insnsi;
+ for (j = 0; j < func[i]->len; j++, insn++) {
+ if (insn->code != (BPF_JMP | BPF_CALL) ||
+ insn->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ subprog = insn->off;
+ insn->off = 0;
+ insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
+ func[subprog]->bpf_func -
+ __bpf_call_base;
+ }
+ }
+ for (i = 0; i <= env->subprog_cnt; i++) {
+ old_bpf_func = func[i]->bpf_func;
+ tmp = bpf_int_jit_compile(func[i]);
+ if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
+ verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
+ err = -EFAULT;
+ goto out_free;
+ }
+ cond_resched();
+ }
+
+ /* finally lock prog and jit images for all functions and
+ * populate kallsysm
+ */
+ for (i = 0; i <= env->subprog_cnt; i++) {
+ bpf_prog_lock_ro(func[i]);
+ bpf_prog_kallsyms_add(func[i]);
+ }
+
+ /* Last step: make now unused interpreter insns from main
+ * prog consistent for later dump requests, so they can
+ * later look the same as if they were interpreted only.
+ */
+ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+ unsigned long addr;
+
+ if (insn->code != (BPF_JMP | BPF_CALL) ||
+ insn->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ insn->off = env->insn_aux_data[i].call_imm;
+ subprog = find_subprog(env, i + insn->off + 1);
+ addr = (unsigned long)func[subprog + 1]->bpf_func;
+ addr &= PAGE_MASK;
+ insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
+ addr - __bpf_call_base;
+ }
+
+ prog->jited = 1;
+ prog->bpf_func = func[0]->bpf_func;
+ prog->aux->func = func;
+ prog->aux->func_cnt = env->subprog_cnt + 1;
+ return 0;
+out_free:
+ for (i = 0; i <= env->subprog_cnt; i++)
+ if (func[i])
+ bpf_jit_free(func[i]);
+ kfree(func);
+ /* cleanup main prog to be interpreted */
+ prog->jit_requested = 0;
+ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+ if (insn->code != (BPF_JMP | BPF_CALL) ||
+ insn->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ insn->off = 0;
+ insn->imm = env->insn_aux_data[i].call_imm;
+ }
+ return err;
+}
+
+static int fixup_call_args(struct bpf_verifier_env *env)
+{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ struct bpf_prog *prog = env->prog;
+ struct bpf_insn *insn = prog->insnsi;
+ int i, depth;
+#endif
+ int err;
+
+ err = 0;
+ if (env->prog->jit_requested) {
+ err = jit_subprogs(env);
+ if (err == 0)
+ return 0;
+ }
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ for (i = 0; i < prog->len; i++, insn++) {
+ if (insn->code != (BPF_JMP | BPF_CALL) ||
+ insn->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ depth = get_callee_stack_depth(env, insn, i);
+ if (depth < 0)
+ return depth;
+ bpf_patch_call_args(insn, depth);
+ }
+ err = 0;
+#endif
+ return err;
+}
+
/* fixup insn->imm field of bpf_call instructions
* and inline eligible helpers as explicit sequence of BPF instructions
*
@@ -4469,15 +5423,37 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
int i, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
- if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
+ if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
+ insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
+ insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
- /* due to JIT bugs clear upper 32-bits of src register
- * before div/mod operation
- */
- insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
- insn_buf[1] = *insn;
- cnt = 2;
- new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
+ struct bpf_insn mask_and_div[] = {
+ BPF_MOV32_REG(insn->src_reg, insn->src_reg),
+ /* Rx div 0 -> 0 */
+ BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
+ BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ *insn,
+ };
+ struct bpf_insn mask_and_mod[] = {
+ BPF_MOV32_REG(insn->src_reg, insn->src_reg),
+ /* Rx mod 0 -> Rx */
+ BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
+ *insn,
+ };
+ struct bpf_insn *patchlet;
+
+ if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
+ insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
+ patchlet = mask_and_div + (is64 ? 1 : 0);
+ cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
+ } else {
+ patchlet = mask_and_mod + (is64 ? 1 : 0);
+ cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
+ }
+
+ new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
if (!new_prog)
return -ENOMEM;
@@ -4489,11 +5465,15 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ continue;
if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
if (insn->imm == BPF_FUNC_get_prandom_u32)
bpf_user_rnd_init_once();
+ if (insn->imm == BPF_FUNC_override_return)
+ prog->kprobe_override = 1;
if (insn->imm == BPF_FUNC_tail_call) {
/* If we tail call into other programs, we
* cannot make any assumptions since they can
@@ -4545,7 +5525,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
* handlers are currently limited to 64 bit only.
*/
- if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
+ if (prog->jit_requested && BITS_PER_LONG == 64 &&
insn->imm == BPF_FUNC_map_lookup_elem) {
map_ptr = env->insn_aux_data[i + delta].map_ptr;
if (map_ptr == BPF_MAP_PTR_POISON ||
@@ -4593,7 +5573,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
insn = new_prog->insnsi + i + delta;
}
patch_call_imm:
- fn = env->ops->get_func_proto(insn->imm);
+ fn = env->ops->get_func_proto(insn->imm, env->prog);
/* all functions that have prototype and verifier allowed
* programs to call them, must be real in-kernel functions
*/
@@ -4635,7 +5615,7 @@ static void free_states(struct bpf_verifier_env *env)
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
{
struct bpf_verifier_env *env;
- struct bpf_verifer_log *log;
+ struct bpf_verifier_log *log;
int ret = -EINVAL;
/* no program is valid */
@@ -4680,7 +5660,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
- if (env->prog->aux->offload) {
+ if (bpf_prog_is_dev_bound(env->prog->aux)) {
ret = bpf_prog_offload_verifier_prep(env);
if (ret)
goto err_unlock;
@@ -4697,12 +5677,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (!env->explored_states)
goto skip_full_check;
+ env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
+
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
- env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
-
ret = do_check(env);
if (env->cur_state) {
free_verifier_state(env->cur_state, true);
@@ -4717,12 +5697,18 @@ skip_full_check:
sanitize_dead_code(env);
if (ret == 0)
+ ret = check_max_stack_depth(env);
+
+ if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);
if (ret == 0)
ret = fixup_bpf_calls(env);
+ if (ret == 0)
+ ret = fixup_call_args(env);
+
if (log->level && bpf_verifier_log_full(log))
ret = -ENOSPC;
if (log->level && !log->ubuf) {