aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-01-23 08:10:16 +0100
committerDavid S. Miller <davem@davemloft.net>2020-01-23 08:10:16 +0100
commit954b3c4397792c8614aa4aaf25030ae87ece8307 (patch)
treeb2648c3d97fe2332863d7baac069c23cab0addc4 /include
parentnet: convert additional drivers to use phy_do_ioctl (diff)
parentMerge branch 'bpf_cubic' (diff)
downloadlinux-dev-954b3c4397792c8614aa4aaf25030ae87ece8307.tar.xz
linux-dev-954b3c4397792c8614aa4aaf25030ae87ece8307.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2020-01-22 The following pull-request contains BPF updates for your *net-next* tree. We've added 92 non-merge commits during the last 16 day(s) which contain a total of 320 files changed, 7532 insertions(+), 1448 deletions(-). The main changes are: 1) function by function verification and program extensions from Alexei. 2) massive cleanup of selftests/bpf from Toke and Andrii. 3) batched bpf map operations from Brian and Yonghong. 4) tcp congestion control in bpf from Martin. 5) bulking for non-map xdp_redirect form Toke. 6) bpf_send_signal_thread helper from Yonghong. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf.h128
-rw-r--r--include/linux/bpf_types.h9
-rw-r--r--include/linux/bpf_verifier.h10
-rw-r--r--include/linux/btf.h52
-rw-r--r--include/linux/filter.h12
-rw-r--r--include/linux/netdevice.h13
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/trace/events/xdp.h130
-rw-r--r--include/uapi/linux/bpf.h72
-rw-r--r--include/uapi/linux/btf.h6
11 files changed, 343 insertions, 93 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index b14e51d56a82..a9687861fd7e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -17,6 +17,7 @@
#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
#include <linux/mutex.h>
+#include <linux/module.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -43,6 +44,15 @@ struct bpf_map_ops {
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
void (*map_release_uref)(struct bpf_map *map);
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
+ int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_lookup_and_delete_batch)(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
@@ -106,6 +116,7 @@ struct bpf_map {
struct btf *btf;
struct bpf_map_memory memory;
char name[BPF_OBJ_NAME_LEN];
+ u32 btf_vmlinux_value_type_id;
bool unpriv_array;
bool frozen; /* write-once; write-protected by freeze_mutex */
/* 22 bytes hole */
@@ -183,7 +194,8 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
- return map->btf && map->ops->map_seq_show_elem;
+ return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
+ map->ops->map_seq_show_elem;
}
int map_check_no_btf(const struct bpf_map *map,
@@ -349,6 +361,10 @@ struct bpf_verifier_ops {
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
+ int (*btf_struct_access)(struct bpf_verifier_log *log,
+ const struct btf_type *t, int off, int size,
+ enum bpf_access_type atype,
+ u32 *next_btf_id);
};
struct bpf_prog_offload_ops {
@@ -437,7 +453,8 @@ struct btf_func_model {
* fentry = a set of program to run before calling original function
* fexit = a set of program to run after original function
*/
-int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
+int arch_prepare_bpf_trampoline(void *image, void *image_end,
+ const struct btf_func_model *m, u32 flags,
struct bpf_prog **fentry_progs, int fentry_cnt,
struct bpf_prog **fexit_progs, int fexit_cnt,
void *orig_call);
@@ -448,7 +465,8 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
enum bpf_tramp_prog_type {
BPF_TRAMP_FENTRY,
BPF_TRAMP_FEXIT,
- BPF_TRAMP_MAX
+ BPF_TRAMP_MAX,
+ BPF_TRAMP_REPLACE, /* more than MAX */
};
struct bpf_trampoline {
@@ -463,6 +481,11 @@ struct bpf_trampoline {
void *addr;
bool ftrace_managed;
} func;
+ /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
+ * program by replacing one of its functions. func.addr is the address
+ * of the function it replaced.
+ */
+ struct bpf_prog *extension_prog;
/* list of BPF programs using this trampoline */
struct hlist_head progs_hlist[BPF_TRAMP_MAX];
/* Number of attached programs. A counter per kind. */
@@ -558,6 +581,7 @@ static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
#endif
struct bpf_func_info_aux {
+ u16 linkage;
bool unreliable;
};
@@ -668,6 +692,73 @@ struct bpf_array_aux {
struct work_struct work;
};
+struct bpf_struct_ops_value;
+struct btf_type;
+struct btf_member;
+
+#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
+struct bpf_struct_ops {
+ const struct bpf_verifier_ops *verifier_ops;
+ int (*init)(struct btf *btf);
+ int (*check_member)(const struct btf_type *t,
+ const struct btf_member *member);
+ int (*init_member)(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata);
+ int (*reg)(void *kdata);
+ void (*unreg)(void *kdata);
+ const struct btf_type *type;
+ const struct btf_type *value_type;
+ const char *name;
+ struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
+ u32 type_id;
+ u32 value_id;
+};
+
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
+const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
+void bpf_struct_ops_init(struct btf *btf);
+bool bpf_struct_ops_get(const void *kdata);
+void bpf_struct_ops_put(const void *kdata);
+int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
+ void *value);
+static inline bool bpf_try_module_get(const void *data, struct module *owner)
+{
+ if (owner == BPF_MODULE_OWNER)
+ return bpf_struct_ops_get(data);
+ else
+ return try_module_get(owner);
+}
+static inline void bpf_module_put(const void *data, struct module *owner)
+{
+ if (owner == BPF_MODULE_OWNER)
+ bpf_struct_ops_put(data);
+ else
+ module_put(owner);
+}
+#else
+static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
+{
+ return NULL;
+}
+static inline void bpf_struct_ops_init(struct btf *btf) { }
+static inline bool bpf_try_module_get(const void *data, struct module *owner)
+{
+ return try_module_get(owner);
+}
+static inline void bpf_module_put(const void *data, struct module *owner)
+{
+ module_put(owner);
+}
+static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
+ void *key,
+ void *value)
+{
+ return -EINVAL;
+}
+#endif
+
struct bpf_array {
struct bpf_map map;
u32 elem_size;
@@ -906,6 +997,15 @@ void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
+int generic_map_lookup_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int generic_map_update_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int generic_map_delete_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
extern int sysctl_unprivileged_bpf_disabled;
@@ -962,7 +1062,9 @@ struct sk_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
-void __dev_map_flush(void);
+void __dev_flush(void);
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+ struct net_device *dev_rx);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
@@ -1006,7 +1108,13 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
const char *func_name,
struct btf_func_model *m);
-int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog);
+struct bpf_reg_state;
+int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *regs);
+int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *reg);
+int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
+ struct btf *btf, const struct btf_type *t);
struct bpf_prog *bpf_prog_by_id(u32 id);
@@ -1071,7 +1179,7 @@ static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map
return NULL;
}
-static inline void __dev_map_flush(void)
+static inline void __dev_flush(void)
{
}
@@ -1079,6 +1187,13 @@ struct xdp_buff;
struct bpf_dtab_netdev;
static inline
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+ struct net_device *dev_rx)
+{
+ return 0;
+}
+
+static inline
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx)
{
@@ -1299,6 +1414,7 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto;
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
extern const struct bpf_func_proto bpf_tcp_sock_proto;
+extern const struct bpf_func_proto bpf_jiffies64_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 93740b3614d7..c81d4ece79a4 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -65,6 +65,12 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2,
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport,
struct sk_reuseport_md, struct sk_reuseport_kern)
#endif
+#if defined(CONFIG_BPF_JIT)
+BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops,
+ void *, void *)
+BPF_PROG_TYPE(BPF_PROG_TYPE_EXT, bpf_extension,
+ void *, void *)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
@@ -105,3 +111,6 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
+#if defined(CONFIG_BPF_JIT)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
+#endif
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 26e40de9ef55..5406e6e96585 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -304,11 +304,13 @@ struct bpf_insn_aux_data {
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int sanitize_stack_off; /* stack slot to be cleared */
- bool seen; /* this insn was processed by the verifier */
+ u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
bool zext_dst; /* this insn zero extends dst reg */
u8 alu_state; /* used in combination with alu_limit */
- bool prune_point;
+
+ /* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
+ bool prune_point;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -379,6 +381,7 @@ struct bpf_verifier_env {
int *insn_stack;
int cur_stack;
} cfg;
+ u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
u32 prev_insn_processed, insn_processed;
@@ -428,4 +431,7 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
+int check_ctx_reg(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno);
+
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 79d4abc2556a..5c1ea99b480f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -7,6 +7,8 @@
#include <linux/types.h>
#include <uapi/linux/btf.h>
+#define BTF_TYPE_EMIT(type) ((void)(type *)0)
+
struct btf;
struct btf_member;
struct btf_type;
@@ -53,6 +55,22 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
u32 expected_offset, u32 expected_size);
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
bool btf_type_is_void(const struct btf_type *t);
+s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
+const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *
+btf_resolve_size(const struct btf *btf, const struct btf_type *type,
+ u32 *type_size, const struct btf_type **elem_type,
+ u32 *total_nelems);
+
+#define for_each_member(i, struct_type, member) \
+ for (i = 0, member = btf_type_member(struct_type); \
+ i < btf_type_vlen(struct_type); \
+ i++, member++)
static inline bool btf_type_is_ptr(const struct btf_type *t)
{
@@ -84,6 +102,40 @@ static inline bool btf_type_is_func_proto(const struct btf_type *t)
return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
}
+static inline u16 btf_type_vlen(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static inline u16 btf_func_linkage(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static inline bool btf_type_kflag(const struct btf_type *t)
+{
+ return BTF_INFO_KFLAG(t->info);
+}
+
+static inline u32 btf_member_bit_offset(const struct btf_type *struct_type,
+ const struct btf_member *member)
+{
+ return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
+ : member->offset;
+}
+
+static inline u32 btf_member_bitfield_size(const struct btf_type *struct_type,
+ const struct btf_member *member)
+{
+ return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
+ : 0;
+}
+
+static inline const struct btf_member *btf_type_member(const struct btf_type *t)
+{
+ return (const struct btf_member *)(t + 1);
+}
+
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 70e6dd960bca..f349e2c0884c 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -843,6 +843,8 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bpf_aux_classic_check_t trans, bool save_orig);
void bpf_prog_destroy(struct bpf_prog *fp);
+const struct bpf_func_proto *
+bpf_base_func_proto(enum bpf_func_id func_id);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
@@ -916,7 +918,7 @@ static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
return 0;
}
-/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
+/* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
* because we only track one map and force a flush when the map changes.
@@ -927,7 +929,13 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *prog);
-void xdp_do_flush_map(void);
+void xdp_do_flush(void);
+
+/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
+ * it is no longer only flushing maps. Keep this define for compatibility
+ * until all drivers are updated - do not use xdp_do_flush_map() in new code!
+ */
+#define xdp_do_flush_map xdp_do_flush
void bpf_warn_invalid_xdp_action(u32 act);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2741aa35bec6..5ec3537fbdb1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -876,6 +876,7 @@ enum bpf_netdev_command {
struct bpf_prog_offload_ops;
struct netlink_ext_ack;
struct xdp_umem;
+struct xdp_dev_bulk_queue;
struct netdev_bpf {
enum bpf_netdev_command command;
@@ -1986,12 +1987,10 @@ struct net_device {
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
struct Qdisc *qdisc;
-#ifdef CONFIG_NET_SCHED
- DECLARE_HASHTABLE (qdisc_hash, 4);
-#endif
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
- int watchdog_timeo;
+
+ struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
#ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_cpus_map;
@@ -2001,11 +2000,15 @@ struct net_device {
struct mini_Qdisc __rcu *miniq_egress;
#endif
+#ifdef CONFIG_NET_SCHED
+ DECLARE_HASHTABLE (qdisc_hash, 4);
+#endif
/* These may be needed for future network-power-down code. */
struct timer_list watchdog_timer;
+ int watchdog_timeo;
- int __percpu *pcpu_refcnt;
struct list_head todo_list;
+ int __percpu *pcpu_refcnt;
struct list_head link_watch_list;
diff --git a/include/net/sock.h b/include/net/sock.h
index 432ff73d20f3..02162b0378f7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2597,4 +2597,6 @@ static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
return false;
}
+void sock_def_readable(struct sock *sk);
+
#endif /* _SOCK_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 2869d28ed5d8..a5ea27df3c2b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1019,6 +1019,7 @@ enum tcp_ca_ack_event_flags {
#define TCP_CONG_NON_RESTRICTED 0x1
/* Requires ECN/ECT set on all packets */
#define TCP_CONG_NEEDS_ECN 0x2
+#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
union tcp_cc_info;
@@ -1113,6 +1114,7 @@ u32 tcp_reno_undo_cwnd(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
+struct tcp_congestion_ops *tcp_ca_find(const char *name);
struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
#ifdef CONFIG_INET
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index a7378bcd9928..b95d65e8c628 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -79,14 +79,26 @@ TRACE_EVENT(xdp_bulk_tx,
__entry->sent, __entry->drops, __entry->err)
);
+#ifndef __DEVMAP_OBJ_TYPE
+#define __DEVMAP_OBJ_TYPE
+struct _bpf_dtab_netdev {
+ struct net_device *dev;
+};
+#endif /* __DEVMAP_OBJ_TYPE */
+
+#define devmap_ifindex(tgt, map) \
+ (((map->map_type == BPF_MAP_TYPE_DEVMAP || \
+ map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
+ ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
+
DECLARE_EVENT_CLASS(xdp_redirect_template,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
+ TP_ARGS(dev, xdp, tgt, err, map, index),
TP_STRUCT__entry(
__field(int, prog_id)
@@ -103,90 +115,65 @@ DECLARE_EVENT_CLASS(xdp_redirect_template,
__entry->act = XDP_REDIRECT;
__entry->ifindex = dev->ifindex;
__entry->err = err;
- __entry->to_ifindex = to_ifindex;
+ __entry->to_ifindex = map ? devmap_ifindex(tgt, map) :
+ index;
__entry->map_id = map ? map->id : 0;
- __entry->map_index = map_index;
+ __entry->map_index = map ? index : 0;
),
- TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d",
+ TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
+ " map_id=%d map_index=%d",
__entry->prog_id,
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
__entry->ifindex, __entry->to_ifindex,
- __entry->err)
+ __entry->err, __entry->map_id, __entry->map_index)
);
DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map, index)
);
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map, index)
);
#define _trace_xdp_redirect(dev, xdp, to) \
- trace_xdp_redirect(dev, xdp, to, 0, NULL, 0);
+ trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to);
#define _trace_xdp_redirect_err(dev, xdp, to, err) \
- trace_xdp_redirect_err(dev, xdp, to, err, NULL, 0);
+ trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to);
-DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map,
+#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
+ trace_xdp_redirect(dev, xdp, to, 0, map, index);
+
+#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
+ trace_xdp_redirect_err(dev, xdp, to, err, map, index);
+
+/* not used anymore, but kept around so as not to break old programs */
+DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
- TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
- " map_id=%d map_index=%d",
- __entry->prog_id,
- __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
- __entry->ifindex, __entry->to_ifindex,
- __entry->err,
- __entry->map_id, __entry->map_index)
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map, index)
);
-DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err,
+DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
- TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
- " map_id=%d map_index=%d",
- __entry->prog_id,
- __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
- __entry->ifindex, __entry->to_ifindex,
- __entry->err,
- __entry->map_id, __entry->map_index)
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map, index)
);
-#ifndef __DEVMAP_OBJ_TYPE
-#define __DEVMAP_OBJ_TYPE
-struct _bpf_dtab_netdev {
- struct net_device *dev;
-};
-#endif /* __DEVMAP_OBJ_TYPE */
-
-#define devmap_ifindex(fwd, map) \
- ((map->map_type == BPF_MAP_TYPE_DEVMAP || \
- map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) ? \
- ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)
-
-#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
- trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \
- 0, map, idx)
-
-#define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \
- trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map), \
- err, map, idx)
-
TRACE_EVENT(xdp_cpumap_kthread,
TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
@@ -259,43 +246,38 @@ TRACE_EVENT(xdp_cpumap_enqueue,
TRACE_EVENT(xdp_devmap_xmit,
- TP_PROTO(const struct bpf_map *map, u32 map_index,
- int sent, int drops,
- const struct net_device *from_dev,
- const struct net_device *to_dev, int err),
+ TP_PROTO(const struct net_device *from_dev,
+ const struct net_device *to_dev,
+ int sent, int drops, int err),
- TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err),
+ TP_ARGS(from_dev, to_dev, sent, drops, err),
TP_STRUCT__entry(
- __field(int, map_id)
+ __field(int, from_ifindex)
__field(u32, act)
- __field(u32, map_index)
+ __field(int, to_ifindex)
__field(int, drops)
__field(int, sent)
- __field(int, from_ifindex)
- __field(int, to_ifindex)
__field(int, err)
),
TP_fast_assign(
- __entry->map_id = map->id;
+ __entry->from_ifindex = from_dev->ifindex;
__entry->act = XDP_REDIRECT;
- __entry->map_index = map_index;
+ __entry->to_ifindex = to_dev->ifindex;
__entry->drops = drops;
__entry->sent = sent;
- __entry->from_ifindex = from_dev->ifindex;
- __entry->to_ifindex = to_dev->ifindex;
__entry->err = err;
),
TP_printk("ndo_xdp_xmit"
- " map_id=%d map_index=%d action=%s"
+ " from_ifindex=%d to_ifindex=%d action=%s"
" sent=%d drops=%d"
- " from_ifindex=%d to_ifindex=%d err=%d",
- __entry->map_id, __entry->map_index,
+ " err=%d",
+ __entry->from_ifindex, __entry->to_ifindex,
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
__entry->sent, __entry->drops,
- __entry->from_ifindex, __entry->to_ifindex, __entry->err)
+ __entry->err)
);
/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 7df436da542d..f1d74a2bd234 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -107,6 +107,10 @@ enum bpf_cmd {
BPF_MAP_LOOKUP_AND_DELETE_ELEM,
BPF_MAP_FREEZE,
BPF_BTF_GET_NEXT_ID,
+ BPF_MAP_LOOKUP_BATCH,
+ BPF_MAP_LOOKUP_AND_DELETE_BATCH,
+ BPF_MAP_UPDATE_BATCH,
+ BPF_MAP_DELETE_BATCH,
};
enum bpf_map_type {
@@ -136,6 +140,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE,
BPF_MAP_TYPE_DEVMAP_HASH,
+ BPF_MAP_TYPE_STRUCT_OPS,
};
/* Note that tracing related programs such as
@@ -174,6 +179,8 @@ enum bpf_prog_type {
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
BPF_PROG_TYPE_CGROUP_SOCKOPT,
BPF_PROG_TYPE_TRACING,
+ BPF_PROG_TYPE_STRUCT_OPS,
+ BPF_PROG_TYPE_EXT,
};
enum bpf_attach_type {
@@ -357,7 +364,12 @@ enum bpf_attach_type {
/* Enable memory-mapping BPF map */
#define BPF_F_MMAPABLE (1U << 10)
-/* flags for BPF_PROG_QUERY */
+/* Flags for BPF_PROG_QUERY. */
+
+/* Query effective (directly attached + inherited from ancestor cgroups)
+ * programs that will be executed for events within a cgroup.
+ * attach_flags with this flag are returned only for directly attached programs.
+ */
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
enum bpf_stack_build_id_status {
@@ -397,6 +409,10 @@ union bpf_attr {
__u32 btf_fd; /* fd pointing to a BTF type data */
__u32 btf_key_type_id; /* BTF type_id of the key */
__u32 btf_value_type_id; /* BTF type_id of the value */
+ __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
+ * struct stored as the
+ * map value
+ */
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -409,6 +425,23 @@ union bpf_attr {
__u64 flags;
};
+ struct { /* struct used by BPF_MAP_*_BATCH commands */
+ __aligned_u64 in_batch; /* start batch,
+ * NULL to start from beginning
+ */
+ __aligned_u64 out_batch; /* output: next start batch */
+ __aligned_u64 keys;
+ __aligned_u64 values;
+ __u32 count; /* input/output:
+ * input: # of key/value
+ * elements
+ * output: # of filled elements
+ */
+ __u32 map_fd;
+ __u64 elem_flags;
+ __u64 flags;
+ } batch;
+
struct { /* anonymous struct used by BPF_PROG_LOAD command */
__u32 prog_type; /* one of enum bpf_prog_type */
__u32 insn_cnt;
@@ -2703,7 +2736,8 @@ union bpf_attr {
*
* int bpf_send_signal(u32 sig)
* Description
- * Send signal *sig* to the current task.
+ * Send signal *sig* to the process of the current task.
+ * The signal may be delivered to any of this process's threads.
* Return
* 0 on success or successfully queued.
*
@@ -2831,6 +2865,33 @@ union bpf_attr {
* Return
* On success, the strictly positive length of the string, including
* the trailing NUL character. On error, a negative value.
+ *
+ * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
+ * Description
+ * Send out a tcp-ack. *tp* is the in-kernel struct tcp_sock.
+ * *rcv_nxt* is the ack_seq to be sent out.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_send_signal_thread(u32 sig)
+ * Description
+ * Send signal *sig* to the thread corresponding to the current task.
+ * Return
+ * 0 on success or successfully queued.
+ *
+ * **-EBUSY** if work queue under nmi is full.
+ *
+ * **-EINVAL** if *sig* is invalid.
+ *
+ * **-EPERM** if no permission to send the *sig*.
+ *
+ * **-EAGAIN** if bpf program can try again.
+ *
+ * u64 bpf_jiffies64(void)
+ * Description
+ * Obtain the 64bit jiffies
+ * Return
+ * The 64 bit jiffies
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2948,7 +3009,10 @@ union bpf_attr {
FN(probe_read_user), \
FN(probe_read_kernel), \
FN(probe_read_user_str), \
- FN(probe_read_kernel_str),
+ FN(probe_read_kernel_str), \
+ FN(tcp_send_ack), \
+ FN(send_signal_thread), \
+ FN(jiffies64),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -3349,7 +3413,7 @@ struct bpf_map_info {
__u32 map_flags;
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
- __u32 :32;
+ __u32 btf_vmlinux_value_type_id;
__u64 netns_dev;
__u64 netns_ino;
__u32 btf_id;
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 1a2898c482ee..5a667107ad2c 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -146,6 +146,12 @@ enum {
BTF_VAR_GLOBAL_EXTERN = 2,
};
+enum btf_func_linkage {
+ BTF_FUNC_STATIC = 0,
+ BTF_FUNC_GLOBAL = 1,
+ BTF_FUNC_EXTERN = 2,
+};
+
/* BTF_KIND_VAR is followed by a single "struct btf_var" to describe
* additional information related to the variable such as its linkage.
*/