aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorStanislav Fomichev <sdf@google.com>2022-04-14 09:12:33 -0700
committerAlexei Starovoitov <ast@kernel.org>2022-04-19 09:45:47 -0700
commit055eb95533273bc334794dbc598400d10800528f (patch)
tree7b694e74401831a39e0c5e3b6bd0c8416a77cc7f /include
parentselftests/bpf: Refactor prog_tests logging and test execution (diff)
downloadlinux-dev-055eb95533273bc334794dbc598400d10800528f.tar.xz
linux-dev-055eb95533273bc334794dbc598400d10800528f.zip
bpf: Move rcu lock management out of BPF_PROG_RUN routines
Commit 7d08c2c91171 ("bpf: Refactor BPF_PROG_RUN_ARRAY family of macros into functions") switched a bunch of BPF_PROG_RUN macros to inline routines. This changed the semantic a bit. Due to arguments expansion of macros, it used to be: rcu_read_lock(); array = rcu_dereference(cgrp->bpf.effective[atype]); ... Now, with with inline routines, we have: array_rcu = rcu_dereference(cgrp->bpf.effective[atype]); /* array_rcu can be kfree'd here */ rcu_read_lock(); array = rcu_dereference(array_rcu); I'm assuming in practice rcu subsystem isn't fast enough to trigger this but let's use rcu API properly. Also, rename to lower caps to not confuse with macros. Additionally, drop and expand BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY. See [1] for more context. [1] https://lore.kernel.org/bpf/CAKH8qBs60fOinFdxiiQikK_q0EcVxGvNTQoWvHLEUGbgcj1UYg@mail.gmail.com/T/#u v2 - keep rcu locks inside by passing cgroup_bpf Fixes: 7d08c2c91171 ("bpf: Refactor BPF_PROG_RUN_ARRAY family of macros into functions") Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Martin KaFai Lau <kafai@fb.com> Link: https://lore.kernel.org/bpf/20220414161233.170780-1-sdf@google.com
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf.h115
1 files changed, 7 insertions, 108 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index bdb5298735ce..7bf441563ffc 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1221,7 +1221,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
/* an array of programs to be executed under rcu_lock.
*
* Typical usage:
- * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run);
+ * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
*
* the structure returned by bpf_prog_array_alloc() should be populated
* with program pointers and the last pointer must be NULL.
@@ -1315,83 +1315,22 @@ static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
-static __always_inline int
-BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
- const void *ctx, bpf_prog_run_fn run_prog,
- int retval, u32 *ret_flags)
-{
- const struct bpf_prog_array_item *item;
- const struct bpf_prog *prog;
- const struct bpf_prog_array *array;
- struct bpf_run_ctx *old_run_ctx;
- struct bpf_cg_run_ctx run_ctx;
- u32 func_ret;
-
- run_ctx.retval = retval;
- migrate_disable();
- rcu_read_lock();
- array = rcu_dereference(array_rcu);
- item = &array->items[0];
- old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
- while ((prog = READ_ONCE(item->prog))) {
- run_ctx.prog_item = item;
- func_ret = run_prog(prog, ctx);
- if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
- run_ctx.retval = -EPERM;
- *(ret_flags) |= (func_ret >> 1);
- item++;
- }
- bpf_reset_run_ctx(old_run_ctx);
- rcu_read_unlock();
- migrate_enable();
- return run_ctx.retval;
-}
-
-static __always_inline int
-BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
- const void *ctx, bpf_prog_run_fn run_prog,
- int retval)
-{
- const struct bpf_prog_array_item *item;
- const struct bpf_prog *prog;
- const struct bpf_prog_array *array;
- struct bpf_run_ctx *old_run_ctx;
- struct bpf_cg_run_ctx run_ctx;
-
- run_ctx.retval = retval;
- migrate_disable();
- rcu_read_lock();
- array = rcu_dereference(array_rcu);
- item = &array->items[0];
- old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
- while ((prog = READ_ONCE(item->prog))) {
- run_ctx.prog_item = item;
- if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval))
- run_ctx.retval = -EPERM;
- item++;
- }
- bpf_reset_run_ctx(old_run_ctx);
- rcu_read_unlock();
- migrate_enable();
- return run_ctx.retval;
-}
-
static __always_inline u32
-BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
+bpf_prog_run_array(const struct bpf_prog_array *array,
const void *ctx, bpf_prog_run_fn run_prog)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
- const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_trace_run_ctx run_ctx;
u32 ret = 1;
- migrate_disable();
- rcu_read_lock();
- array = rcu_dereference(array_rcu);
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
+
if (unlikely(!array))
- goto out;
+ return ret;
+
+ migrate_disable();
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
item = &array->items[0];
while ((prog = READ_ONCE(item->prog))) {
@@ -1400,50 +1339,10 @@ BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
item++;
}
bpf_reset_run_ctx(old_run_ctx);
-out:
- rcu_read_unlock();
migrate_enable();
return ret;
}
-/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
- * so BPF programs can request cwr for TCP packets.
- *
- * Current cgroup skb programs can only return 0 or 1 (0 to drop the
- * packet. This macro changes the behavior so the low order bit
- * indicates whether the packet should be dropped (0) or not (1)
- * and the next bit is a congestion notification bit. This could be
- * used by TCP to call tcp_enter_cwr()
- *
- * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
- * 0: drop packet
- * 1: keep packet
- * 2: drop packet and cn
- * 3: keep packet and cn
- *
- * This macro then converts it to one of the NET_XMIT or an error
- * code that is then interpreted as drop packet (and no cn):
- * 0: NET_XMIT_SUCCESS skb should be transmitted
- * 1: NET_XMIT_DROP skb should be dropped and cn
- * 2: NET_XMIT_CN skb should be transmitted and cn
- * 3: -err skb should be dropped
- */
-#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
- ({ \
- u32 _flags = 0; \
- bool _cn; \
- u32 _ret; \
- _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, 0, &_flags); \
- _cn = _flags & BPF_RET_SET_CN; \
- if (_ret && !IS_ERR_VALUE((long)_ret)) \
- _ret = -EFAULT; \
- if (!_ret) \
- _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
- else \
- _ret = (_cn ? NET_XMIT_DROP : _ret); \
- _ret; \
- })
-
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
extern struct mutex bpf_stats_enabled_mutex;