aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/bpf-cgroup.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/bpf-cgroup.h')
-rw-r--r--include/linux/bpf-cgroup.h361
1 files changed, 231 insertions, 130 deletions
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index a11d5b7dbbf3..57e9e109257e 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -3,11 +3,12 @@
#define _BPF_CGROUP_H
#include <linux/bpf.h>
+#include <linux/bpf-cgroup-defs.h>
#include <linux/errno.h>
#include <linux/jump_label.h>
#include <linux/percpu.h>
-#include <linux/percpu-refcount.h>
#include <linux/rbtree.h>
+#include <net/sock.h>
#include <uapi/linux/bpf.h>
struct sock;
@@ -20,14 +21,56 @@ struct bpf_sock_ops_kern;
struct bpf_cgroup_storage;
struct ctl_table;
struct ctl_table_header;
+struct task_struct;
+
+unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
+ const struct bpf_insn *insn);
+unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
+ const struct bpf_insn *insn);
+unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
+ const struct bpf_insn *insn);
#ifdef CONFIG_CGROUP_BPF
-extern struct static_key_false cgroup_bpf_enabled_key;
-#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+#define CGROUP_ATYPE(type) \
+ case BPF_##type: return type
+
+static inline enum cgroup_bpf_attach_type
+to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+{
+ switch (attach_type) {
+ CGROUP_ATYPE(CGROUP_INET_INGRESS);
+ CGROUP_ATYPE(CGROUP_INET_EGRESS);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
+ CGROUP_ATYPE(CGROUP_SOCK_OPS);
+ CGROUP_ATYPE(CGROUP_DEVICE);
+ CGROUP_ATYPE(CGROUP_INET4_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_BIND);
+ CGROUP_ATYPE(CGROUP_INET4_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET6_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
+ CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
+ CGROUP_ATYPE(CGROUP_SYSCTL);
+ CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
+ CGROUP_ATYPE(CGROUP_GETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_SETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
+ default:
+ return CGROUP_BPF_ATTACH_TYPE_INVALID;
+ }
+}
-DECLARE_PER_CPU(struct bpf_cgroup_storage*,
- bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
+#undef CGROUP_ATYPE
+
+extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
#define for_each_cgroup_storage_type(stype) \
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
@@ -36,7 +79,7 @@ struct bpf_cgroup_storage_map;
struct bpf_storage_buffer {
struct rcu_head rcu;
- char data[0];
+ char data[];
};
struct bpf_cgroup_storage {
@@ -46,85 +89,52 @@ struct bpf_cgroup_storage {
};
struct bpf_cgroup_storage_map *map;
struct bpf_cgroup_storage_key key;
- struct list_head list;
+ struct list_head list_map;
+ struct list_head list_cg;
struct rb_node node;
struct rcu_head rcu;
};
+struct bpf_cgroup_link {
+ struct bpf_link link;
+ struct cgroup *cgroup;
+ enum bpf_attach_type type;
+};
+
struct bpf_prog_list {
- struct list_head node;
+ struct hlist_node node;
struct bpf_prog *prog;
+ struct bpf_cgroup_link *link;
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
};
-struct bpf_prog_array;
-
-struct cgroup_bpf {
- /* array of effective progs in this cgroup */
- struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
-
- /* attached progs to this cgroup and attach flags
- * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
- * have either zero or one element
- * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
- */
- struct list_head progs[MAX_BPF_ATTACH_TYPE];
- u32 flags[MAX_BPF_ATTACH_TYPE];
-
- /* temp storage for effective prog array used by prog_attach/detach */
- struct bpf_prog_array *inactive;
-
- /* reference counter used to detach bpf programs after cgroup removal */
- struct percpu_ref refcnt;
-
- /* cgroup_bpf is released using a work queue */
- struct work_struct release_work;
-};
-
int cgroup_bpf_inherit(struct cgroup *cgrp);
void cgroup_bpf_offline(struct cgroup *cgrp);
-int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
- struct bpf_prog *replace_prog,
- enum bpf_attach_type type, u32 flags);
-int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type);
-int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr);
-
-/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
-int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
- struct bpf_prog *replace_prog, enum bpf_attach_type type,
- u32 flags);
-int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, u32 flags);
-int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr);
-
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sk(struct sock *sk,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
- enum bpf_attach_type type,
- void *t_ctx);
+ enum cgroup_bpf_attach_type atype,
+ void *t_ctx,
+ u32 *flags);
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
- short access, enum bpf_attach_type type);
+ short access, enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
struct ctl_table *table, int write,
- void __user *buf, size_t *pcount,
- loff_t *ppos, void **new_buf,
- enum bpf_attach_type type);
+ char **buf, size_t *pcount, loff_t *ppos,
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
int *optname, char __user *optval,
@@ -134,6 +144,10 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
int __user *optlen, int max_optlen,
int retval);
+int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
+ int optname, void *optval,
+ int *optlen, int retval);
+
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
struct bpf_map *map)
{
@@ -143,15 +157,9 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type(
return BPF_CGROUP_STORAGE_SHARED;
}
-static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
- *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
-{
- enum bpf_cgroup_storage_type stype;
-
- for_each_cgroup_storage_type(stype)
- this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
-}
-
+struct bpf_cgroup_storage *
+cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
+ void *key, bool locked);
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
enum bpf_cgroup_storage_type stype);
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
@@ -160,19 +168,30 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
enum bpf_attach_type type);
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
-void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map);
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
void *value, u64 flags);
+/* Opportunistic check to see whether we have any BPF program attached*/
+static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
+ enum cgroup_bpf_attach_type type)
+{
+ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ struct bpf_prog_array *array;
+
+ array = rcu_access_pointer(cgrp->bpf.effective[type]);
+ return array != &bpf_empty_prog_array.hdr;
+}
+
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
+ cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
- BPF_CGROUP_INET_INGRESS); \
+ CGROUP_INET_INGRESS); \
\
__ret; \
})
@@ -180,119 +199,164 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
- if (sk_fullsock(__sk)) \
+ if (sk_fullsock(__sk) && \
+ cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
- BPF_CGROUP_INET_EGRESS); \
+ CGROUP_INET_EGRESS); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
+#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) { \
- __ret = __cgroup_bpf_run_filter_sk(sk, type); \
+ if (cgroup_bpf_enabled(atype)) { \
+ __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
} \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
+
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- NULL); \
+ if (cgroup_bpf_enabled(atype)) \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
+ NULL, NULL); \
__ret; \
})
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) { \
+ if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- t_ctx); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
+ t_ctx, NULL); \
release_sock(sk); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
-
-#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
+/* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
+ * via upper bits of return code. The only flag that is supported
+ * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
+ * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
+ */
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
+({ \
+ u32 __flags = 0; \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) { \
+ lock_sock(sk); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
+ NULL, &__flags); \
+ release_sock(sk); \
+ if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
+ *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
+ } \
+ __ret; \
+})
-#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
- sk->sk_prot->pre_connect)
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
+ ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
+ cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
+ (sk)->sk_prot->pre_connect)
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
+
+/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
+ * fullsock and its parent fullsock cannot be traced by
+ * sk_to_full_sk().
+ *
+ * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
+ * Its listener-sk is not attached to the rsk_listener.
+ * In this case, the caller holds the listener-sk (unlocked),
+ * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
+ * the listener-sk such that the cgroup-bpf-progs of the
+ * listener-sk will be run.
+ *
+ * Regardless of syncookie mode or not,
+ * calling bpf_setsockopt on listener-sk will not make sense anyway,
+ * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
+ */
+#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
+ __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
+ sock_ops, \
+ CGROUP_SOCK_OPS); \
+ __ret; \
+})
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && (sock_ops)->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
sock_ops, \
- BPF_CGROUP_SOCK_OPS); \
+ CGROUP_SOCK_OPS); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
+ if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
+ __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
access, \
- BPF_CGROUP_DEVICE); \
+ CGROUP_DEVICE); \
\
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \
+#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
- buf, count, pos, nbuf, \
- BPF_CGROUP_SYSCTL); \
+ buf, count, pos, \
+ CGROUP_SYSCTL); \
__ret; \
})
@@ -300,7 +364,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
kernel_optval) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
+ cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
optname, optval, \
optlen, \
@@ -311,7 +376,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
get_user(__ret, optlen); \
__ret; \
})
@@ -320,11 +385,25 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
max_optlen, retval) \
({ \
int __ret = retval; \
- if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
- optname, optval, \
- optlen, max_optlen, \
- retval); \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
+ cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
+ if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
+ !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
+ tcp_bpf_bypass_getsockopt, \
+ level, optname)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt( \
+ sock, level, optname, optval, optlen, \
+ max_optlen, retval); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) \
+({ \
+ int __ret = retval; \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
+ sock, level, optname, optval, optlen, retval); \
__ret; \
})
@@ -332,12 +411,16 @@ int cgroup_bpf_prog_attach(const union bpf_attr *attr,
enum bpf_prog_type ptype, struct bpf_prog *prog);
int cgroup_bpf_prog_detach(const union bpf_attr *attr,
enum bpf_prog_type ptype);
+int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int cgroup_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr);
+
+const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
#else
-struct bpf_prog;
-struct cgroup_bpf {};
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
@@ -354,18 +437,32 @@ static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
return -EINVAL;
}
+static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
return -EINVAL;
}
-static inline void bpf_cgroup_storage_set(
- struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
+static inline const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
+static inline const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
-static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux,
- struct bpf_map *map) {}
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
static inline void bpf_cgroup_storage_free(
@@ -379,13 +476,15 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
return 0;
}
-#define cgroup_bpf_enabled (0)
+#define cgroup_bpf_enabled(atype) (0)
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
@@ -397,11 +496,13 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
optlen, max_optlen, retval) ({ retval; })
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) ({ retval; })
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
kernel_optval) ({ 0; })