aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skmsg.h
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2018-10-13 02:45:58 +0200
committerAlexei Starovoitov <ast@kernel.org>2018-10-15 12:23:19 -0700
commit604326b41a6fb9b4a78b6179335decee0365cd8c (patch)
tree95d439c3739f0b3ed5022780cd3f6925f1a4f94d /include/linux/skmsg.h
parenttcp, ulp: remove ulp bits from sockmap (diff)
downloadlinux-dev-604326b41a6fb9b4a78b6179335decee0365cd8c.tar.xz
linux-dev-604326b41a6fb9b4a78b6179335decee0365cd8c.zip
bpf, sockmap: convert to generic sk_msg interface
Add a generic sk_msg layer, and convert current sockmap and later kTLS over to make use of it. While sk_buff handles network packet representation from netdevice up to socket, sk_msg handles data representation from application to socket layer. This means that sk_msg framework spans across ULP users in the kernel, and enables features such as introspection or filtering of data with the help of BPF programs that operate on this data structure. Latter becomes in particular useful for kTLS where data encryption is deferred into the kernel, and as such enabling the kernel to perform L7 introspection and policy based on BPF for TLS connections where the record is being encrypted after BPF has run and came to a verdict. In order to get there, first step is to transform open coding of scatter-gather list handling into a common core framework that subsystems can use. The code itself has been split and refactored into three bigger pieces: i) the generic sk_msg API which deals with managing the scatter gather ring, providing helpers for walking and mangling, transferring application data from user space into it, and preparing it for BPF pre/post-processing, ii) the plain sock map itself where sockets can be attached to or detached from; these bits are independent of i) which can now be used also without sock map, and iii) the integration with plain TCP as one protocol to be used for processing L7 application data (later this could e.g. also be extended to other protocols like UDP). The semantics are the same with the old sock map code and therefore no change of user facing behavior or APIs. While pursuing this work it also helped finding a number of bugs in the old sockmap code that we've fixed already in earlier commits. The test_sockmap kselftest suite passes through fine as well. Joint work with John. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'include/linux/skmsg.h')
-rw-r--r--include/linux/skmsg.h371
1 files changed, 371 insertions, 0 deletions
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
new file mode 100644
index 000000000000..95678103c4a0
--- /dev/null
+++ b/include/linux/skmsg.h
@@ -0,0 +1,371 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
+
+#ifndef _LINUX_SKMSG_H
+#define _LINUX_SKMSG_H
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/strparser.h>
+
+#define MAX_MSG_FRAGS MAX_SKB_FRAGS
+
+enum __sk_action {
+ __SK_DROP = 0,
+ __SK_PASS,
+ __SK_REDIRECT,
+ __SK_NONE,
+};
+
+struct sk_msg_sg {
+ u32 start;
+ u32 curr;
+ u32 end;
+ u32 size;
+ u32 copybreak;
+ bool copy[MAX_MSG_FRAGS];
+ struct scatterlist data[MAX_MSG_FRAGS];
+};
+
+struct sk_msg {
+ struct sk_msg_sg sg;
+ void *data;
+ void *data_end;
+ u32 apply_bytes;
+ u32 cork_bytes;
+ u32 flags;
+ struct sk_buff *skb;
+ struct sock *sk_redir;
+ struct sock *sk;
+ struct list_head list;
+};
+
+struct sk_psock_progs {
+ struct bpf_prog *msg_parser;
+ struct bpf_prog *skb_parser;
+ struct bpf_prog *skb_verdict;
+};
+
+enum sk_psock_state_bits {
+ SK_PSOCK_TX_ENABLED,
+};
+
+struct sk_psock_link {
+ struct list_head list;
+ struct bpf_map *map;
+ void *link_raw;
+};
+
+struct sk_psock_parser {
+ struct strparser strp;
+ bool enabled;
+ void (*saved_data_ready)(struct sock *sk);
+};
+
+struct sk_psock_work_state {
+ struct sk_buff *skb;
+ u32 len;
+ u32 off;
+};
+
+struct sk_psock {
+ struct sock *sk;
+ struct sock *sk_redir;
+ u32 apply_bytes;
+ u32 cork_bytes;
+ u32 eval;
+ struct sk_msg *cork;
+ struct sk_psock_progs progs;
+ struct sk_psock_parser parser;
+ struct sk_buff_head ingress_skb;
+ struct list_head ingress_msg;
+ unsigned long state;
+ struct list_head link;
+ spinlock_t link_lock;
+ refcount_t refcnt;
+ void (*saved_unhash)(struct sock *sk);
+ void (*saved_close)(struct sock *sk, long timeout);
+ void (*saved_write_space)(struct sock *sk);
+ struct proto *sk_proto;
+ struct sk_psock_work_state work_state;
+ struct work_struct work;
+ union {
+ struct rcu_head rcu;
+ struct work_struct gc;
+ };
+};
+
+int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
+ int elem_first_coalesce);
+void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
+int sk_msg_free(struct sock *sk, struct sk_msg *msg);
+int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
+void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
+void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
+ u32 bytes);
+
+void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
+
+int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
+ struct sk_msg *msg, u32 bytes);
+int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
+ struct sk_msg *msg, u32 bytes);
+
+static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
+{
+ WARN_ON(i == msg->sg.end && bytes);
+}
+
+static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
+{
+ if (psock->apply_bytes) {
+ if (psock->apply_bytes < bytes)
+ psock->apply_bytes = 0;
+ else
+ psock->apply_bytes -= bytes;
+ }
+}
+
+#define sk_msg_iter_var_prev(var) \
+ do { \
+ if (var == 0) \
+ var = MAX_MSG_FRAGS - 1; \
+ else \
+ var--; \
+ } while (0)
+
+#define sk_msg_iter_var_next(var) \
+ do { \
+ var++; \
+ if (var == MAX_MSG_FRAGS) \
+ var = 0; \
+ } while (0)
+
+#define sk_msg_iter_prev(msg, which) \
+ sk_msg_iter_var_prev(msg->sg.which)
+
+#define sk_msg_iter_next(msg, which) \
+ sk_msg_iter_var_next(msg->sg.which)
+
+static inline void sk_msg_clear_meta(struct sk_msg *msg)
+{
+ memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy));
+}
+
+static inline void sk_msg_init(struct sk_msg *msg)
+{
+ memset(msg, 0, sizeof(*msg));
+ sg_init_marker(msg->sg.data, ARRAY_SIZE(msg->sg.data));
+}
+
+static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
+ int which, u32 size)
+{
+ dst->sg.data[which] = src->sg.data[which];
+ dst->sg.data[which].length = size;
+ src->sg.data[which].length -= size;
+ src->sg.data[which].offset += size;
+}
+
+static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
+{
+ return msg->sg.end >= msg->sg.start ?
+ msg->sg.end - msg->sg.start :
+ msg->sg.end + (MAX_MSG_FRAGS - msg->sg.start);
+}
+
+static inline bool sk_msg_full(const struct sk_msg *msg)
+{
+ return (msg->sg.end == msg->sg.start) && msg->sg.size;
+}
+
+static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
+{
+ return &msg->sg.data[which];
+}
+
+static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
+{
+ return sg_page(sk_msg_elem(msg, which));
+}
+
+static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
+{
+ return msg->flags & BPF_F_INGRESS;
+}
+
+static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
+{
+ struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
+
+ if (msg->sg.copy[msg->sg.start]) {
+ msg->data = NULL;
+ msg->data_end = NULL;
+ } else {
+ msg->data = sg_virt(sge);
+ msg->data_end = msg->data + sge->length;
+ }
+}
+
+static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
+ u32 len, u32 offset)
+{
+ struct scatterlist *sge;
+
+ get_page(page);
+ sge = sk_msg_elem(msg, msg->sg.end);
+ sg_set_page(sge, page, len, offset);
+ sg_unmark_end(sge);
+
+ msg->sg.copy[msg->sg.end] = true;
+ msg->sg.size += len;
+ sk_msg_iter_next(msg, end);
+}
+
+static inline struct sk_psock *sk_psock(const struct sock *sk)
+{
+ return rcu_dereference_sk_user_data(sk);
+}
+
+static inline bool sk_has_psock(struct sock *sk)
+{
+ return sk_psock(sk) != NULL && sk->sk_prot->recvmsg == tcp_bpf_recvmsg;
+}
+
+static inline void sk_psock_queue_msg(struct sk_psock *psock,
+ struct sk_msg *msg)
+{
+ list_add_tail(&msg->list, &psock->ingress_msg);
+}
+
+static inline void sk_psock_report_error(struct sk_psock *psock, int err)
+{
+ struct sock *sk = psock->sk;
+
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
+}
+
+struct sk_psock *sk_psock_init(struct sock *sk, int node);
+
+int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
+void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
+void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
+
+int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
+ struct sk_msg *msg);
+
+static inline struct sk_psock_link *sk_psock_init_link(void)
+{
+ return kzalloc(sizeof(struct sk_psock_link),
+ GFP_ATOMIC | __GFP_NOWARN);
+}
+
+static inline void sk_psock_free_link(struct sk_psock_link *link)
+{
+ kfree(link);
+}
+
+struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
+#if defined(CONFIG_BPF_STREAM_PARSER)
+void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link);
+#else
+static inline void sk_psock_unlink(struct sock *sk,
+ struct sk_psock_link *link)
+{
+}
+#endif
+
+void __sk_psock_purge_ingress_msg(struct sk_psock *psock);
+
+static inline void sk_psock_cork_free(struct sk_psock *psock)
+{
+ if (psock->cork) {
+ sk_msg_free(psock->sk, psock->cork);
+ kfree(psock->cork);
+ psock->cork = NULL;
+ }
+}
+
+static inline void sk_psock_update_proto(struct sock *sk,
+ struct sk_psock *psock,
+ struct proto *ops)
+{
+ psock->saved_unhash = sk->sk_prot->unhash;
+ psock->saved_close = sk->sk_prot->close;
+ psock->saved_write_space = sk->sk_write_space;
+
+ psock->sk_proto = sk->sk_prot;
+ sk->sk_prot = ops;
+}
+
+static inline void sk_psock_restore_proto(struct sock *sk,
+ struct sk_psock *psock)
+{
+ if (psock->sk_proto) {
+ sk->sk_prot = psock->sk_proto;
+ psock->sk_proto = NULL;
+ }
+}
+
+static inline void sk_psock_set_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ set_bit(bit, &psock->state);
+}
+
+static inline void sk_psock_clear_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ clear_bit(bit, &psock->state);
+}
+
+static inline bool sk_psock_test_state(const struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ return test_bit(bit, &psock->state);
+}
+
+static inline struct sk_psock *sk_psock_get(struct sock *sk)
+{
+ struct sk_psock *psock;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (psock && !refcount_inc_not_zero(&psock->refcnt))
+ psock = NULL;
+ rcu_read_unlock();
+ return psock;
+}
+
+void sk_psock_stop(struct sock *sk, struct sk_psock *psock);
+void sk_psock_destroy(struct rcu_head *rcu);
+void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
+
+static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
+{
+ if (refcount_dec_and_test(&psock->refcnt))
+ sk_psock_drop(sk, psock);
+}
+
+static inline void psock_set_prog(struct bpf_prog **pprog,
+ struct bpf_prog *prog)
+{
+ prog = xchg(pprog, prog);
+ if (prog)
+ bpf_prog_put(prog);
+}
+
+static inline void psock_progs_drop(struct sk_psock_progs *progs)
+{
+ psock_set_prog(&progs->msg_parser, NULL);
+ psock_set_prog(&progs->skb_parser, NULL);
+ psock_set_prog(&progs->skb_verdict, NULL);
+}
+
+#endif /* _LINUX_SKMSG_H */