aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/verifier.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-03 16:27:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-03 16:27:18 -0700
commitcb8e59cc87201af93dfbb6c3dccc8fcad72a09c2 (patch)
treea334db9022f89654b777bbce8c4c6632e65b9031 /kernel/bpf/verifier.c
parentMerge branch 'uaccess.comedi' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs (diff)
parentselftests: net: ip_defrag: ignore EPERM (diff)
downloadlinux-dev-cb8e59cc87201af93dfbb6c3dccc8fcad72a09c2.tar.xz
linux-dev-cb8e59cc87201af93dfbb6c3dccc8fcad72a09c2.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from David Miller: 1) Allow setting bluetooth L2CAP modes via socket option, from Luiz Augusto von Dentz. 2) Add GSO partial support to igc, from Sasha Neftin. 3) Several cleanups and improvements to r8169 from Heiner Kallweit. 4) Add IF_OPER_TESTING link state and use it when ethtool triggers a device self-test. From Andrew Lunn. 5) Start moving away from custom driver versions, use the globally defined kernel version instead, from Leon Romanovsky. 6) Support GRO vis gro_cells in DSA layer, from Alexander Lobakin. 7) Allow hard IRQ deferral during NAPI, from Eric Dumazet. 8) Add sriov and vf support to hinic, from Luo bin. 9) Support Media Redundancy Protocol (MRP) in the bridging code, from Horatiu Vultur. 10) Support netmap in the nft_nat code, from Pablo Neira Ayuso. 11) Allow UDPv6 encapsulation of ESP in the ipsec code, from Sabrina Dubroca. Also add ipv6 support for espintcp. 12) Lots of ReST conversions of the networking documentation, from Mauro Carvalho Chehab. 13) Support configuration of ethtool rxnfc flows in bcmgenet driver, from Doug Berger. 14) Allow to dump cgroup id and filter by it in inet_diag code, from Dmitry Yakunin. 15) Add infrastructure to export netlink attribute policies to userspace, from Johannes Berg. 16) Several optimizations to sch_fq scheduler, from Eric Dumazet. 17) Fallback to the default qdisc if qdisc init fails because otherwise a packet scheduler init failure will make a device inoperative. From Jesper Dangaard Brouer. 18) Several RISCV bpf jit optimizations, from Luke Nelson. 19) Correct the return type of the ->ndo_start_xmit() method in several drivers, it's netdev_tx_t but many drivers were using 'int'. From Yunjian Wang. 20) Add an ethtool interface for PHY master/slave config, from Oleksij Rempel. 21) Add BPF iterators, from Yonghang Song. 22) Add cable test infrastructure, including ethool interfaces, from Andrew Lunn. Marvell PHY driver is the first to support this facility. 23) Remove zero-length arrays all over, from Gustavo A. R. Silva. 24) Calculate and maintain an explicit frame size in XDP, from Jesper Dangaard Brouer. 25) Add CAP_BPF, from Alexei Starovoitov. 26) Support terse dumps in the packet scheduler, from Vlad Buslov. 27) Support XDP_TX bulking in dpaa2 driver, from Ioana Ciornei. 28) Add devm_register_netdev(), from Bartosz Golaszewski. 29) Minimize qdisc resets, from Cong Wang. 30) Get rid of kernel_getsockopt and kernel_setsockopt in order to eliminate set_fs/get_fs calls. From Christoph Hellwig. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2517 commits) selftests: net: ip_defrag: ignore EPERM net_failover: fixed rollback in net_failover_open() Revert "tipc: Fix potential tipc_aead refcnt leak in tipc_crypto_rcv" Revert "tipc: Fix potential tipc_node refcnt leak in tipc_rcv" vmxnet3: allow rx flow hash ops only when rss is enabled hinic: add set_channels ethtool_ops support selftests/bpf: Add a default $(CXX) value tools/bpf: Don't use $(COMPILE.c) bpf, selftests: Use bpf_probe_read_kernel s390/bpf: Use bcr 0,%0 as tail call nop filler s390/bpf: Maintain 8-byte stack alignment selftests/bpf: Fix verifier test selftests/bpf: Fix sample_cnt shared between two threads bpf, selftests: Adapt cls_redirect to call csum_level helper bpf: Add csum_level helper for fixing up csum levels bpf: Fix up bpf_skb_adjust_room helper's skb csum setting sfc: add missing annotation for efx_ef10_try_update_nic_stats_vf() crypto/chtls: IPv6 support for inline TLS Crypto/chcr: Fixes a coccinile check error Crypto/chcr: Fixes compilations warnings ...
Diffstat (limited to 'kernel/bpf/verifier.c')
-rw-r--r--kernel/bpf/verifier.c387
1 files changed, 299 insertions, 88 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index efe14cf24bc6..5c7bbaac81ef 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -28,9 +28,11 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
[_id] = & _name ## _verifier_ops,
#define BPF_MAP_TYPE(_id, _ops)
+#define BPF_LINK_TYPE(_id, _name)
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
+#undef BPF_LINK_TYPE
};
/* bpf_check() is a static code analyzer that walks eBPF program
@@ -168,6 +170,8 @@ struct bpf_verifier_stack_elem {
int insn_idx;
int prev_insn_idx;
struct bpf_verifier_stack_elem *next;
+ /* length of verifier log at the time this state was pushed on stack */
+ u32 log_pos;
};
#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
@@ -229,6 +233,7 @@ struct bpf_call_arg_meta {
bool pkt_access;
int regno;
int access_size;
+ int mem_size;
u64 msize_max_value;
int ref_obj_id;
int func_id;
@@ -283,6 +288,18 @@ void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
log->ubuf = NULL;
}
+static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
+{
+ char zero = 0;
+
+ if (!bpf_verifier_log_needed(log))
+ return;
+
+ log->len_used = new_pos;
+ if (put_user(zero, log->ubuf + new_pos))
+ log->ubuf = NULL;
+}
+
/* log_level controls verbosity level of eBPF verifier.
* bpf_verifier_log_write() is used to dump the verification trace to the log,
* so the user can figure out what's wrong with the program
@@ -377,12 +394,23 @@ static bool type_is_sk_pointer(enum bpf_reg_type type)
type == PTR_TO_XDP_SOCK;
}
+static bool reg_type_not_null(enum bpf_reg_type type)
+{
+ return type == PTR_TO_SOCKET ||
+ type == PTR_TO_TCP_SOCK ||
+ type == PTR_TO_MAP_VALUE ||
+ type == PTR_TO_SOCK_COMMON ||
+ type == PTR_TO_BTF_ID;
+}
+
static bool reg_type_may_be_null(enum bpf_reg_type type)
{
return type == PTR_TO_MAP_VALUE_OR_NULL ||
type == PTR_TO_SOCKET_OR_NULL ||
type == PTR_TO_SOCK_COMMON_OR_NULL ||
- type == PTR_TO_TCP_SOCK_OR_NULL;
+ type == PTR_TO_TCP_SOCK_OR_NULL ||
+ type == PTR_TO_BTF_ID_OR_NULL ||
+ type == PTR_TO_MEM_OR_NULL;
}
static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
@@ -396,7 +424,9 @@ static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
return type == PTR_TO_SOCKET ||
type == PTR_TO_SOCKET_OR_NULL ||
type == PTR_TO_TCP_SOCK ||
- type == PTR_TO_TCP_SOCK_OR_NULL;
+ type == PTR_TO_TCP_SOCK_OR_NULL ||
+ type == PTR_TO_MEM ||
+ type == PTR_TO_MEM_OR_NULL;
}
static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
@@ -410,14 +440,37 @@ static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
*/
static bool is_release_function(enum bpf_func_id func_id)
{
- return func_id == BPF_FUNC_sk_release;
+ return func_id == BPF_FUNC_sk_release ||
+ func_id == BPF_FUNC_ringbuf_submit ||
+ func_id == BPF_FUNC_ringbuf_discard;
}
-static bool is_acquire_function(enum bpf_func_id func_id)
+static bool may_be_acquire_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_sk_lookup_tcp ||
func_id == BPF_FUNC_sk_lookup_udp ||
- func_id == BPF_FUNC_skc_lookup_tcp;
+ func_id == BPF_FUNC_skc_lookup_tcp ||
+ func_id == BPF_FUNC_map_lookup_elem ||
+ func_id == BPF_FUNC_ringbuf_reserve;
+}
+
+static bool is_acquire_function(enum bpf_func_id func_id,
+ const struct bpf_map *map)
+{
+ enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
+
+ if (func_id == BPF_FUNC_sk_lookup_tcp ||
+ func_id == BPF_FUNC_sk_lookup_udp ||
+ func_id == BPF_FUNC_skc_lookup_tcp ||
+ func_id == BPF_FUNC_ringbuf_reserve)
+ return true;
+
+ if (func_id == BPF_FUNC_map_lookup_elem &&
+ (map_type == BPF_MAP_TYPE_SOCKMAP ||
+ map_type == BPF_MAP_TYPE_SOCKHASH))
+ return true;
+
+ return false;
}
static bool is_ptr_cast_function(enum bpf_func_id func_id)
@@ -448,6 +501,9 @@ static const char * const reg_type_str[] = {
[PTR_TO_TP_BUFFER] = "tp_buffer",
[PTR_TO_XDP_SOCK] = "xdp_sock",
[PTR_TO_BTF_ID] = "ptr_",
+ [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_",
+ [PTR_TO_MEM] = "mem",
+ [PTR_TO_MEM_OR_NULL] = "mem_or_null",
};
static char slot_type_char[] = {
@@ -508,7 +564,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
/* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%lld", reg->var_off.value + reg->off);
} else {
- if (t == PTR_TO_BTF_ID)
+ if (t == PTR_TO_BTF_ID || t == PTR_TO_BTF_ID_OR_NULL)
verbose(env, "%s", kernel_type_name(reg->btf_id));
verbose(env, "(id=%d", reg->id);
if (reg_type_may_be_refcounted_or_null(t))
@@ -846,7 +902,7 @@ static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifi
}
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
- int *insn_idx)
+ int *insn_idx, bool pop_log)
{
struct bpf_verifier_state *cur = env->cur_state;
struct bpf_verifier_stack_elem *elem, *head = env->head;
@@ -860,6 +916,8 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
if (err)
return err;
}
+ if (pop_log)
+ bpf_vlog_reset(&env->log, head->log_pos);
if (insn_idx)
*insn_idx = head->insn_idx;
if (prev_insn_idx)
@@ -887,6 +945,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
elem->insn_idx = insn_idx;
elem->prev_insn_idx = prev_insn_idx;
elem->next = env->head;
+ elem->log_pos = env->log.len_used;
env->head = elem;
env->stack_size++;
err = copy_verifier_state(&elem->st, cur);
@@ -915,7 +974,7 @@ err:
free_verifier_state(env->cur_state, true);
env->cur_state = NULL;
/* pop all elements and return */
- while (!pop_stack(env, NULL, NULL));
+ while (!pop_stack(env, NULL, NULL, false));
return NULL;
}
@@ -1255,7 +1314,7 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env,
reg->type = SCALAR_VALUE;
reg->var_off = tnum_unknown;
reg->frameno = 0;
- reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks;
+ reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
__mark_reg_unbounded(reg);
}
@@ -1387,8 +1446,9 @@ static int check_subprogs(struct bpf_verifier_env *env)
continue;
if (insn[i].src_reg != BPF_PSEUDO_CALL)
continue;
- if (!env->allow_ptr_leaks) {
- verbose(env, "function calls to other bpf functions are allowed for root only\n");
+ if (!env->bpf_capable) {
+ verbose(env,
+ "function calls to other bpf functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
return -EPERM;
}
ret = add_subprog(env, i + insn[i].imm + 1);
@@ -1922,8 +1982,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
bool new_marks = false;
int i, err;
- if (!env->allow_ptr_leaks)
- /* backtracking is root only for now */
+ if (!env->bpf_capable)
return 0;
func = st->frame[st->curframe];
@@ -2101,6 +2160,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
case PTR_TO_TCP_SOCK_OR_NULL:
case PTR_TO_XDP_SOCK:
case PTR_TO_BTF_ID:
+ case PTR_TO_BTF_ID_OR_NULL:
return true;
default:
return false;
@@ -2170,7 +2230,7 @@ static int check_stack_write(struct bpf_verifier_env *env,
reg = &cur->regs[value_regno];
if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
- !register_is_null(reg) && env->allow_ptr_leaks) {
+ !register_is_null(reg) && env->bpf_capable) {
if (dst_reg != BPF_REG_FP) {
/* The backtracking logic can only recognize explicit
* stack slot address like [fp - 8]. Other spill of
@@ -2196,7 +2256,7 @@ static int check_stack_write(struct bpf_verifier_env *env,
return -EINVAL;
}
- if (!env->allow_ptr_leaks) {
+ if (!env->bypass_spec_v4) {
bool sanitize = false;
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
@@ -2418,32 +2478,49 @@ static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
return 0;
}
-/* check read/write into map element returned by bpf_map_lookup_elem() */
-static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
- int size, bool zero_size_allowed)
+/* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
+static int __check_mem_access(struct bpf_verifier_env *env, int regno,
+ int off, int size, u32 mem_size,
+ bool zero_size_allowed)
{
- struct bpf_reg_state *regs = cur_regs(env);
- struct bpf_map *map = regs[regno].map_ptr;
+ bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
+ struct bpf_reg_state *reg;
+
+ if (off >= 0 && size_ok && (u64)off + size <= mem_size)
+ return 0;
- if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
- off + size > map->value_size) {
+ reg = &cur_regs(env)[regno];
+ switch (reg->type) {
+ case PTR_TO_MAP_VALUE:
verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
- map->value_size, off, size);
- return -EACCES;
+ mem_size, off, size);
+ break;
+ case PTR_TO_PACKET:
+ case PTR_TO_PACKET_META:
+ case PTR_TO_PACKET_END:
+ verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
+ off, size, regno, reg->id, off, mem_size);
+ break;
+ case PTR_TO_MEM:
+ default:
+ verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
+ mem_size, off, size);
}
- return 0;
+
+ return -EACCES;
}
-/* check read/write into a map element with possible variable offset */
-static int check_map_access(struct bpf_verifier_env *env, u32 regno,
- int off, int size, bool zero_size_allowed)
+/* check read/write into a memory region with possible variable offset */
+static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
+ int off, int size, u32 mem_size,
+ bool zero_size_allowed)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *reg = &state->regs[regno];
int err;
- /* We may have adjusted the register to this map value, so we
+ /* We may have adjusted the register pointing to memory region, so we
* need to try adding each of min_value and max_value to off
* to make sure our theoretical access will be safe.
*/
@@ -2464,10 +2541,10 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
regno);
return -EACCES;
}
- err = __check_map_access(env, regno, reg->smin_value + off, size,
- zero_size_allowed);
+ err = __check_mem_access(env, regno, reg->smin_value + off, size,
+ mem_size, zero_size_allowed);
if (err) {
- verbose(env, "R%d min value is outside of the array range\n",
+ verbose(env, "R%d min value is outside of the allowed memory range\n",
regno);
return err;
}
@@ -2477,18 +2554,38 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
* If reg->umax_value + off could overflow, treat that as unbounded too.
*/
if (reg->umax_value >= BPF_MAX_VAR_OFF) {
- verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
+ verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
regno);
return -EACCES;
}
- err = __check_map_access(env, regno, reg->umax_value + off, size,
- zero_size_allowed);
- if (err)
- verbose(env, "R%d max value is outside of the array range\n",
+ err = __check_mem_access(env, regno, reg->umax_value + off, size,
+ mem_size, zero_size_allowed);
+ if (err) {
+ verbose(env, "R%d max value is outside of the allowed memory range\n",
regno);
+ return err;
+ }
+
+ return 0;
+}
+
+/* check read/write into a map element with possible variable offset */
+static int check_map_access(struct bpf_verifier_env *env, u32 regno,
+ int off, int size, bool zero_size_allowed)
+{
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
+ struct bpf_reg_state *reg = &state->regs[regno];
+ struct bpf_map *map = reg->map_ptr;
+ int err;
- if (map_value_has_spin_lock(reg->map_ptr)) {
- u32 lock = reg->map_ptr->spin_lock_off;
+ err = check_mem_region_access(env, regno, off, size, map->value_size,
+ zero_size_allowed);
+ if (err)
+ return err;
+
+ if (map_value_has_spin_lock(map)) {
+ u32 lock = map->spin_lock_off;
/* if any part of struct bpf_spin_lock can be touched by
* load/store reject this program.
@@ -2546,21 +2643,6 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
}
}
-static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
- int off, int size, bool zero_size_allowed)
-{
- struct bpf_reg_state *regs = cur_regs(env);
- struct bpf_reg_state *reg = &regs[regno];
-
- if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
- (u64)off + size > reg->range) {
- verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
- off, size, regno, reg->id, reg->off, reg->range);
- return -EACCES;
- }
- return 0;
-}
-
static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, bool zero_size_allowed)
{
@@ -2581,16 +2663,17 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
regno);
return -EACCES;
}
- err = __check_packet_access(env, regno, off, size, zero_size_allowed);
+ err = __check_mem_access(env, regno, off, size, reg->range,
+ zero_size_allowed);
if (err) {
verbose(env, "R%d offset is outside of the packet\n", regno);
return err;
}
- /* __check_packet_access has made sure "off + size - 1" is within u16.
+ /* __check_mem_access has made sure "off + size - 1" is within u16.
* reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
* otherwise find_good_pkt_pointers would have refused to set range info
- * that __check_packet_access would have rejected this pkt access.
+ * that __check_mem_access would have rejected this pkt access.
* Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
*/
env->prog->aux->max_pkt_offset =
@@ -2621,7 +2704,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
*/
*reg_type = info.reg_type;
- if (*reg_type == PTR_TO_BTF_ID)
+ if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL)
*btf_id = info.btf_id;
else
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
@@ -3170,6 +3253,16 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
mark_reg_unknown(env, regs, value_regno);
}
}
+ } else if (reg->type == PTR_TO_MEM) {
+ if (t == BPF_WRITE && value_regno >= 0 &&
+ is_pointer_value(env, value_regno)) {
+ verbose(env, "R%d leaks addr into mem\n", value_regno);
+ return -EACCES;
+ }
+ err = check_mem_region_access(env, regno, off, size,
+ reg->mem_size, false);
+ if (!err && t == BPF_READ && value_regno >= 0)
+ mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
enum bpf_reg_type reg_type = SCALAR_VALUE;
u32 btf_id = 0;
@@ -3205,7 +3298,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
* a sub-register.
*/
regs[value_regno].subreg_def = DEF_NOT_SUBREG;
- if (reg_type == PTR_TO_BTF_ID)
+ if (reg_type == PTR_TO_BTF_ID ||
+ reg_type == PTR_TO_BTF_ID_OR_NULL)
regs[value_regno].btf_id = btf_id;
}
regs[value_regno].type = reg_type;
@@ -3390,7 +3484,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
* Spectre masking for stack ALU.
* See also retrieve_ptr_limit().
*/
- if (!env->allow_ptr_leaks) {
+ if (!env->bypass_spec_v1) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
@@ -3452,6 +3546,11 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
*stype = STACK_MISC;
goto mark;
}
+
+ if (state->stack[spi].slot_type[0] == STACK_SPILL &&
+ state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
+ goto mark;
+
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
@@ -3501,6 +3600,10 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
return -EACCES;
return check_map_access(env, regno, reg->off, access_size,
zero_size_allowed);
+ case PTR_TO_MEM:
+ return check_mem_region_access(env, regno, reg->off,
+ access_size, reg->mem_size,
+ zero_size_allowed);
default: /* scalar_value|ptr_to_stack or invalid ptr */
return check_stack_boundary(env, regno, access_size,
zero_size_allowed, meta);
@@ -3605,6 +3708,17 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type)
type == ARG_CONST_SIZE_OR_ZERO;
}
+static bool arg_type_is_alloc_mem_ptr(enum bpf_arg_type type)
+{
+ return type == ARG_PTR_TO_ALLOC_MEM ||
+ type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
+}
+
+static bool arg_type_is_alloc_size(enum bpf_arg_type type)
+{
+ return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
+}
+
static bool arg_type_is_int_ptr(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_INT ||
@@ -3664,7 +3778,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
type != expected_type)
goto err_type;
} else if (arg_type == ARG_CONST_SIZE ||
- arg_type == ARG_CONST_SIZE_OR_ZERO) {
+ arg_type == ARG_CONST_SIZE_OR_ZERO ||
+ arg_type == ARG_CONST_ALLOC_SIZE_OR_ZERO) {
expected_type = SCALAR_VALUE;
if (type != expected_type)
goto err_type;
@@ -3735,13 +3850,29 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
* happens during stack boundary checking.
*/
if (register_is_null(reg) &&
- arg_type == ARG_PTR_TO_MEM_OR_NULL)
+ (arg_type == ARG_PTR_TO_MEM_OR_NULL ||
+ arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL))
/* final test in check_stack_boundary() */;
else if (!type_is_pkt_pointer(type) &&
type != PTR_TO_MAP_VALUE &&
+ type != PTR_TO_MEM &&
type != expected_type)
goto err_type;
meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
+ } else if (arg_type_is_alloc_mem_ptr(arg_type)) {
+ expected_type = PTR_TO_MEM;
+ if (register_is_null(reg) &&
+ arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL)
+ /* final test in check_stack_boundary() */;
+ else if (type != expected_type)
+ goto err_type;
+ if (meta->ref_obj_id) {
+ verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
+ regno, reg->ref_obj_id,
+ meta->ref_obj_id);
+ return -EFAULT;
+ }
+ meta->ref_obj_id = reg->ref_obj_id;
} else if (arg_type_is_int_ptr(arg_type)) {
expected_type = PTR_TO_STACK;
if (!type_is_pkt_pointer(type) &&
@@ -3837,6 +3968,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
zero_size_allowed, meta);
if (!err)
err = mark_chain_precision(env, regno);
+ } else if (arg_type_is_alloc_size(arg_type)) {
+ if (!tnum_is_const(reg->var_off)) {
+ verbose(env, "R%d unbounded size, use 'var &= const' or 'if (var < const)'\n",
+ regno);
+ return -EACCES;
+ }
+ meta->mem_size = reg->var_off.value;
} else if (arg_type_is_int_ptr(arg_type)) {
int size = int_ptr_type_to_size(arg_type);
@@ -3873,6 +4011,14 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
func_id != BPF_FUNC_xdp_output)
goto error;
break;
+ case BPF_MAP_TYPE_RINGBUF:
+ if (func_id != BPF_FUNC_ringbuf_output &&
+ func_id != BPF_FUNC_ringbuf_reserve &&
+ func_id != BPF_FUNC_ringbuf_submit &&
+ func_id != BPF_FUNC_ringbuf_discard &&
+ func_id != BPF_FUNC_ringbuf_query)
+ goto error;
+ break;
case BPF_MAP_TYPE_STACK_TRACE:
if (func_id != BPF_FUNC_get_stackid)
goto error;
@@ -3915,7 +4061,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
func_id != BPF_FUNC_sock_map_update &&
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_map &&
- func_id != BPF_FUNC_sk_select_reuseport)
+ func_id != BPF_FUNC_sk_select_reuseport &&
+ func_id != BPF_FUNC_map_lookup_elem)
goto error;
break;
case BPF_MAP_TYPE_SOCKHASH:
@@ -3923,7 +4070,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
func_id != BPF_FUNC_sock_hash_update &&
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_hash &&
- func_id != BPF_FUNC_sk_select_reuseport)
+ func_id != BPF_FUNC_sk_select_reuseport &&
+ func_id != BPF_FUNC_map_lookup_elem)
goto error;
break;
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
@@ -4093,7 +4241,7 @@ static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
/* A reference acquiring function cannot acquire
* another refcounted ptr.
*/
- if (is_acquire_function(func_id) && count)
+ if (may_be_acquire_function(func_id) && count)
return false;
/* We only support one arg being unreferenced at the moment,
@@ -4388,10 +4536,10 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
if (!BPF_MAP_PTR(aux->map_ptr_state))
bpf_map_ptr_store(aux, meta->map_ptr,
- meta->map_ptr->unpriv_array);
+ !meta->map_ptr->bypass_spec_v1);
else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
- meta->map_ptr->unpriv_array);
+ !meta->map_ptr->bypass_spec_v1);
return 0;
}
@@ -4597,6 +4745,11 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
regs[BPF_REG_0].id = ++env->id_gen;
+ } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) {
+ mark_reg_known_zero(env, regs, BPF_REG_0);
+ regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
+ regs[BPF_REG_0].id = ++env->id_gen;
+ regs[BPF_REG_0].mem_size = meta.mem_size;
} else {
verbose(env, "unknown return type %d of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id);
@@ -4606,7 +4759,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
if (is_ptr_cast_function(func_id)) {
/* For release_reference() */
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
- } else if (is_acquire_function(func_id)) {
+ } else if (is_acquire_function(func_id, meta.map_ptr)) {
int id = acquire_reference_state(env, insn_idx);
if (id < 0)
@@ -4760,7 +4913,7 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
const struct bpf_insn *insn)
{
- return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
+ return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
}
static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
@@ -5070,7 +5223,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
/* For unprivileged we require that resulting offset must be in bounds
* in order to be able to sanitize access later on.
*/
- if (!env->allow_ptr_leaks) {
+ if (!env->bypass_spec_v1) {
if (dst_reg->type == PTR_TO_MAP_VALUE &&
check_map_access(env, dst, dst_reg->off, 1, false)) {
verbose(env, "R%d pointer arithmetic of map value goes out of range, "
@@ -5611,7 +5764,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
{
struct bpf_reg_state *regs = cur_regs(env);
u8 opcode = BPF_OP(insn->code);
- bool src_known, dst_known;
+ bool src_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
s32 s32_min_val, s32_max_val;
@@ -5633,7 +5786,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
if (alu32) {
src_known = tnum_subreg_is_const(src_reg.var_off);
- dst_known = tnum_subreg_is_const(dst_reg->var_off);
if ((src_known &&
(s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
@@ -5645,7 +5797,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
}
} else {
src_known = tnum_is_const(src_reg.var_off);
- dst_known = tnum_is_const(dst_reg->var_off);
if ((src_known &&
(smin_val != smax_val || umin_val != umax_val)) ||
smin_val > smax_val || umin_val > umax_val) {
@@ -6261,8 +6412,25 @@ static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
bool is_jmp32)
{
- if (__is_pointer_value(false, reg))
- return -1;
+ if (__is_pointer_value(false, reg)) {
+ if (!reg_type_not_null(reg->type))
+ return -1;
+
+ /* If pointer is valid tests against zero will fail so we can
+ * use this to direct branch taken.
+ */
+ if (val != 0)
+ return -1;
+
+ switch (opcode) {
+ case BPF_JEQ:
+ return 0;
+ case BPF_JNE:
+ return 1;
+ default:
+ return -1;
+ }
+ }
if (is_jmp32)
return is_branch32_taken(reg, val, opcode);
@@ -6517,12 +6685,16 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
if (is_null) {
reg->type = SCALAR_VALUE;
} else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
- if (reg->map_ptr->inner_map_meta) {
+ const struct bpf_map *map = reg->map_ptr;
+
+ if (map->inner_map_meta) {
reg->type = CONST_PTR_TO_MAP;
- reg->map_ptr = reg->map_ptr->inner_map_meta;
- } else if (reg->map_ptr->map_type ==
- BPF_MAP_TYPE_XSKMAP) {
+ reg->map_ptr = map->inner_map_meta;
+ } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
reg->type = PTR_TO_XDP_SOCK;
+ } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
+ map->map_type == BPF_MAP_TYPE_SOCKHASH) {
+ reg->type = PTR_TO_SOCKET;
} else {
reg->type = PTR_TO_MAP_VALUE;
}
@@ -6532,6 +6704,10 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
reg->type = PTR_TO_SOCK_COMMON;
} else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
reg->type = PTR_TO_TCP_SOCK;
+ } else if (reg->type == PTR_TO_BTF_ID_OR_NULL) {
+ reg->type = PTR_TO_BTF_ID;
+ } else if (reg->type == PTR_TO_MEM_OR_NULL) {
+ reg->type = PTR_TO_MEM;
}
if (is_null) {
/* We don't need id and ref_obj_id from this point
@@ -6755,7 +6931,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
}
if (pred >= 0) {
- err = mark_chain_precision(env, insn->dst_reg);
+ /* If we get here with a dst_reg pointer type it is because
+ * above is_branch_taken() special cased the 0 comparison.
+ */
+ if (!__is_pointer_value(false, dst_reg))
+ err = mark_chain_precision(env, insn->dst_reg);
if (BPF_SRC(insn->code) == BPF_X && !err)
err = mark_chain_precision(env, insn->src_reg);
if (err)
@@ -7041,7 +7221,11 @@ static int check_return_code(struct bpf_verifier_env *env)
switch (env->prog->type) {
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
- env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
+ env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
+ env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
+ env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
+ env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
+ env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
range = tnum_range(1, 1);
break;
case BPF_PROG_TYPE_CGROUP_SKB:
@@ -7070,6 +7254,8 @@ static int check_return_code(struct bpf_verifier_env *env)
case BPF_TRACE_RAW_TP:
case BPF_MODIFY_RETURN:
return 0;
+ case BPF_TRACE_ITER:
+ break;
default:
return -ENOTSUPP;
}
@@ -7206,7 +7392,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
insn_stack[env->cfg.cur_stack++] = w;
return 1;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
- if (loop_ok && env->allow_ptr_leaks)
+ if (loop_ok && env->bpf_capable)
return 0;
verbose_linfo(env, t, "%d: ", t);
verbose_linfo(env, w, "%d: ", w);
@@ -8315,7 +8501,7 @@ next:
if (env->max_states_per_insn < states_cnt)
env->max_states_per_insn = states_cnt;
- if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
+ if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
return push_jmp_history(env, cur);
if (!add_new_state)
@@ -8402,6 +8588,7 @@ static bool reg_type_mismatch_ok(enum bpf_reg_type type)
case PTR_TO_TCP_SOCK_OR_NULL:
case PTR_TO_XDP_SOCK:
case PTR_TO_BTF_ID:
+ case PTR_TO_BTF_ID_OR_NULL:
return false;
default:
return true;
@@ -8428,6 +8615,7 @@ static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
static int do_check(struct bpf_verifier_env *env)
{
+ bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
struct bpf_verifier_state *state = env->cur_state;
struct bpf_insn *insns = env->prog->insnsi;
struct bpf_reg_state *regs;
@@ -8704,7 +8892,7 @@ static int do_check(struct bpf_verifier_env *env)
process_bpf_exit:
update_branch_counts(env, env->cur_state);
err = pop_stack(env, &prev_insn_idx,
- &env->insn_idx);
+ &env->insn_idx, pop_log);
if (err < 0) {
if (err != -ENOENT)
return err;
@@ -9974,7 +10162,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
insn->code = BPF_JMP | BPF_TAIL_CALL;
aux = &env->insn_aux_data[i + delta];
- if (env->allow_ptr_leaks && !expect_blinding &&
+ if (env->bpf_capable && !expect_blinding &&
prog->jit_requested &&
!bpf_map_key_poisoned(aux) &&
!bpf_map_ptr_poisoned(aux) &&
@@ -10227,6 +10415,7 @@ static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
static int do_check_common(struct bpf_verifier_env *env, int subprog)
{
+ bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
struct bpf_verifier_state *state;
struct bpf_reg_state *regs;
int ret, i;
@@ -10289,7 +10478,9 @@ out:
free_verifier_state(env->cur_state, true);
env->cur_state = NULL;
}
- while (!pop_stack(env, NULL, NULL));
+ while (!pop_stack(env, NULL, NULL, false));
+ if (!ret && pop_log)
+ bpf_vlog_reset(&env->log, 0);
free_states(env);
if (ret)
/* clean aux data in case subprog was rejected */
@@ -10445,6 +10636,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
struct bpf_prog *tgt_prog = prog->aux->linked_prog;
u32 btf_id = prog->aux->attach_btf_id;
const char prefix[] = "btf_trace_";
+ struct btf_func_model fmodel;
int ret = 0, subprog = -1, i;
struct bpf_trampoline *tr;
const struct btf_type *t;
@@ -10586,6 +10778,22 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
prog->aux->attach_func_proto = t;
prog->aux->attach_btf_trace = true;
return 0;
+ case BPF_TRACE_ITER:
+ if (!btf_type_is_func(t)) {
+ verbose(env, "attach_btf_id %u is not a function\n",
+ btf_id);
+ return -EINVAL;
+ }
+ t = btf_type_by_id(btf, t->type);
+ if (!btf_type_is_func_proto(t))
+ return -EINVAL;
+ prog->aux->attach_func_name = tname;
+ prog->aux->attach_func_proto = t;
+ if (!bpf_iter_prog_supported(prog))
+ return -EINVAL;
+ ret = btf_distill_func_proto(&env->log, btf, t,
+ tname, &fmodel);
+ return ret;
default:
if (!prog_extension)
return -EINVAL;
@@ -10696,7 +10904,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
env->insn_aux_data[i].orig_idx = i;
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
- is_priv = capable(CAP_SYS_ADMIN);
+ is_priv = bpf_capable();
if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
mutex_lock(&bpf_verifier_lock);
@@ -10737,7 +10945,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
env->strict_alignment = false;
- env->allow_ptr_leaks = is_priv;
+ env->allow_ptr_leaks = bpf_allow_ptr_leaks();
+ env->bypass_spec_v1 = bpf_bypass_spec_v1();
+ env->bypass_spec_v4 = bpf_bypass_spec_v4();
+ env->bpf_capable = bpf_capable();
if (is_priv)
env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;