aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/filter.h
diff options
context:
space:
mode:
authorBjörn Töpel <bjorn.topel@intel.com>2019-12-13 18:51:09 +0100
committerAlexei Starovoitov <ast@kernel.org>2019-12-13 13:09:32 -0800
commit7e6897f95935973c3253fd756135b5ea58043dc8 (patch)
treeb51f503250727273df277698fac760b6cdc26393 /include/linux/filter.h
parentbpf: Introduce BPF dispatcher (diff)
downloadwireguard-linux-7e6897f95935973c3253fd756135b5ea58043dc8.tar.xz
wireguard-linux-7e6897f95935973c3253fd756135b5ea58043dc8.zip
bpf, xdp: Start using the BPF dispatcher for XDP
This commit adds a BPF dispatcher for XDP. The dispatcher is updated from the XDP control-path, dev_xdp_install(), and used when an XDP program is run via bpf_prog_run_xdp(). Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20191213175112.30208-4-bjorn.topel@gmail.com
Diffstat (limited to 'include/linux/filter.h')
-rw-r--r--include/linux/filter.h40
1 files changed, 24 insertions, 16 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a141cb07e76a..37ac7025031d 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -559,23 +559,26 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
-#define BPF_PROG_RUN(prog, ctx) ({ \
- u32 ret; \
- cant_sleep(); \
- if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
- struct bpf_prog_stats *stats; \
- u64 start = sched_clock(); \
- ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
- stats = this_cpu_ptr(prog->aux->stats); \
- u64_stats_update_begin(&stats->syncp); \
- stats->cnt++; \
- stats->nsecs += sched_clock() - start; \
- u64_stats_update_end(&stats->syncp); \
- } else { \
- ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
- } \
+#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
+ u32 ret; \
+ cant_sleep(); \
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
+ struct bpf_prog_stats *stats; \
+ u64 start = sched_clock(); \
+ ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
+ stats = this_cpu_ptr(prog->aux->stats); \
+ u64_stats_update_begin(&stats->syncp); \
+ stats->cnt++; \
+ stats->nsecs += sched_clock() - start; \
+ u64_stats_update_end(&stats->syncp); \
+ } else { \
+ ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
+ } \
ret; })
+#define BPF_PROG_RUN(prog, ctx) __BPF_PROG_RUN(prog, ctx, \
+ bpf_dispatcher_nopfunc)
+
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
struct bpf_skb_data_end {
@@ -699,6 +702,8 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
return res;
}
+DECLARE_BPF_DISPATCHER(bpf_dispatcher_xdp)
+
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
@@ -708,9 +713,12 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
* already takes rcu_read_lock() when fetching the program, so
* it's not necessary here anymore.
*/
- return BPF_PROG_RUN(prog, xdp);
+ return __BPF_PROG_RUN(prog, xdp,
+ BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
}
+void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
+
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
return prog->len * sizeof(struct bpf_insn);