From 1b688a19a92223cf2d1892c9d05d64dc397b33e3 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Wed, 23 Aug 2017 15:10:50 +0100 Subject: bpf/verifier: remove varlen_map_value_access flag The optimisation it does is broken when the 'new' register value has a variable offset and the 'old' was constant. I broke it with my pointer types unification (see Fixes tag below), before which the 'new' value would have type PTR_TO_MAP_VALUE_ADJ and would thus not compare equal; other changes in that patch mean that its original behaviour (ignore min/max values) cannot be restored. Tests on a sample set of cilium programs show no change in count of processed instructions. Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") Signed-off-by: Edward Cree Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf_verifier.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux/bpf_verifier.h') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 91d07efed2ba..d8f131a36fd0 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -125,7 +125,6 @@ struct bpf_verifier_env { u32 id_gen; /* used to generate unique reg IDs */ bool allow_ptr_leaks; bool seen_direct_write; - bool varlen_map_value_access; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ }; -- cgit v1.2.3-59-g8ed1b From 8e9cd9ce90d48369b2c5ddd79fe3d4a4cb1ccb56 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Wed, 23 Aug 2017 15:11:21 +0100 Subject: bpf/verifier: document liveness analysis The liveness tracking algorithm is quite subtle; add comments to explain it. Signed-off-by: Edward Cree Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf_verifier.h | 13 +++++++++++++ kernel/bpf/verifier.c | 28 +++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) (limited to 'include/linux/bpf_verifier.h') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index d8f131a36fd0..b8d200f60a40 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -21,6 +21,19 @@ */ #define BPF_MAX_VAR_SIZ INT_MAX +/* Liveness marks, used for registers and spilled-regs (in stack slots). + * Read marks propagate upwards until they find a write mark; they record that + * "one of this state's descendants read this reg" (and therefore the reg is + * relevant for states_equal() checks). + * Write marks collect downwards and do not propagate; they record that "the + * straight-line code that reached this state (from its parent) wrote this reg" + * (and therefore that reads propagated from this state or its descendants + * should not propagate to its parent). + * A state with a write mark can receive read marks; it just won't propagate + * them to its parent, since the write mark is a property, not of the state, + * but of the link between it and its parent. See mark_reg_read() and + * mark_stack_slot_read() in kernel/bpf/verifier.c. + */ enum bpf_reg_liveness { REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 711bdbd22cea..d690c7dd1f1a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3417,6 +3417,12 @@ out_free: return ret; } +/* A write screens off any subsequent reads; but write marks come from the + * straight-line code between a state and its parent. When we arrive at a + * jump target (in the first iteration of the propagate_liveness() loop), + * we didn't arrive by the straight-line code, so read marks in state must + * propagate to parent regardless of state's write marks. + */ static bool do_propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { @@ -3457,6 +3463,15 @@ static bool do_propagate_liveness(const struct bpf_verifier_state *state, return touched; } +/* "parent" is "a state from which we reach the current state", but initially + * it is not the state->parent (i.e. "the state whose straight-line code leads + * to the current state"), instead it is the state that happened to arrive at + * a (prunable) equivalent of the current state. See comment above + * do_propagate_liveness() for consequences of this. + * This function is just a more efficient way of calling mark_reg_read() or + * mark_stack_slot_read() on each reg in "parent" that is read in "state", + * though it requires that parent != state->parent in the call arguments. + */ static void propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { @@ -3485,6 +3500,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. + * If we have any write marks in env->cur_state, they + * will prevent corresponding reads in the continuation + * from reaching our parent (an explored_state). Our + * own state will get the read marks recorded, but + * they'll be immediately forgotten as we're pruning + * this state and will pop a new one. */ propagate_liveness(&sl->state, &env->cur_state); return 1; @@ -3508,7 +3529,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ env->cur_state.parent = &new_sl->state; - /* clear liveness marks in current state */ + /* clear write marks in current state: the writes we did are not writes + * our child did, so they don't screen off its reads from us. + * (There are no read marks in current state, because reads always mark + * their parent and current state never has children yet. Only + * explored_states can get read marks.) + */ for (i = 0; i < BPF_REG_FP; i++) env->cur_state.regs[i].live = REG_LIVE_NONE; for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) -- cgit v1.2.3-59-g8ed1b