aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/filter.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/filter.h')
-rw-r--r--include/linux/filter.h65
1 files changed, 40 insertions, 25 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a141cb07e76a..f349e2c0884c 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -420,7 +420,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
#define BPF_FIELD_SIZEOF(type, field) \
({ \
- const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
+ const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \
BUILD_BUG_ON(__size < 0); \
__size; \
})
@@ -497,7 +497,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
({ \
- BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \
+ BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE)); \
*(PTR_SIZE) = (SIZE); \
offsetof(TYPE, MEMBER); \
})
@@ -559,23 +559,26 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
-#define BPF_PROG_RUN(prog, ctx) ({ \
- u32 ret; \
- cant_sleep(); \
- if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
- struct bpf_prog_stats *stats; \
- u64 start = sched_clock(); \
- ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
- stats = this_cpu_ptr(prog->aux->stats); \
- u64_stats_update_begin(&stats->syncp); \
- stats->cnt++; \
- stats->nsecs += sched_clock() - start; \
- u64_stats_update_end(&stats->syncp); \
- } else { \
- ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
- } \
+#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
+ u32 ret; \
+ cant_sleep(); \
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
+ struct bpf_prog_stats *stats; \
+ u64 start = sched_clock(); \
+ ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
+ stats = this_cpu_ptr(prog->aux->stats); \
+ u64_stats_update_begin(&stats->syncp); \
+ stats->cnt++; \
+ stats->nsecs += sched_clock() - start; \
+ u64_stats_update_end(&stats->syncp); \
+ } else { \
+ ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
+ } \
ret; })
+#define BPF_PROG_RUN(prog, ctx) __BPF_PROG_RUN(prog, ctx, \
+ bpf_dispatcher_nopfunc)
+
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
struct bpf_skb_data_end {
@@ -589,7 +592,6 @@ struct bpf_redirect_info {
u32 tgt_index;
void *tgt_value;
struct bpf_map *map;
- struct bpf_map *map_to_flush;
u32 kern_flags;
};
@@ -608,7 +610,7 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
{
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
- BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
+ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
cb->data_meta = skb->data - skb_metadata_len(skb);
cb->data_end = skb->data + skb_headlen(skb);
}
@@ -646,9 +648,9 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
* attached to sockets, we need to clear the bpf_skb_cb() area
* to not leak previous contents to user space.
*/
- BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
- BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
- FIELD_SIZEOF(struct qdisc_skb_cb, data));
+ BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
+ BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) !=
+ sizeof_field(struct qdisc_skb_cb, data));
return qdisc_skb_cb(skb)->data;
}
@@ -699,6 +701,8 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
return res;
}
+DECLARE_BPF_DISPATCHER(bpf_dispatcher_xdp)
+
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
@@ -708,9 +712,12 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
* already takes rcu_read_lock() when fetching the program, so
* it's not necessary here anymore.
*/
- return BPF_PROG_RUN(prog, xdp);
+ return __BPF_PROG_RUN(prog, xdp,
+ BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
}
+void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
+
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
return prog->len * sizeof(struct bpf_insn);
@@ -836,6 +843,8 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bpf_aux_classic_check_t trans, bool save_orig);
void bpf_prog_destroy(struct bpf_prog *fp);
+const struct bpf_func_proto *
+bpf_base_func_proto(enum bpf_func_id func_id);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
@@ -909,7 +918,7 @@ static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
return 0;
}
-/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
+/* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
* because we only track one map and force a flush when the map changes.
@@ -920,7 +929,13 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *prog);
-void xdp_do_flush_map(void);
+void xdp_do_flush(void);
+
+/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
+ * it is no longer only flushing maps. Keep this define for compatibility
+ * until all drivers are updated - do not use xdp_do_flush_map() in new code!
+ */
+#define xdp_do_flush_map xdp_do_flush
void bpf_warn_invalid_xdp_action(u32 act);