aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tls.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tls.h')
-rw-r--r--include/net/tls.h215
1 files changed, 166 insertions, 49 deletions
diff --git a/include/net/tls.h b/include/net/tls.h
index 53d96bca220d..c664e6dba0d1 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -40,6 +40,8 @@
#include <linux/socket.h>
#include <linux/tcp.h>
#include <linux/skmsg.h>
+#include <linux/netdevice.h>
+#include <linux/rcupdate.h>
#include <net/tcp.h>
#include <net/strparser.h>
@@ -61,6 +63,7 @@
#define TLS_DEVICE_NAME_MAX 32
#define MAX_IV_SIZE 16
+#define TLS_MAX_REC_SEQ_SIZE 8
/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
*
@@ -105,9 +108,7 @@ struct tls_device {
enum {
TLS_BASE,
TLS_SW,
-#ifdef CONFIG_TLS_DEVICE
TLS_HW,
-#endif
TLS_HW_RECORD,
TLS_NUM_CONFIG,
};
@@ -160,6 +161,7 @@ struct tls_sw_context_tx {
int async_capable;
#define BIT_TX_SCHEDULED 0
+#define BIT_TX_CLOSING 1
unsigned long tx_bitmask;
};
@@ -197,20 +199,24 @@ struct tls_offload_context_tx {
struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
void (*sk_destruct)(struct sock *sk);
- u8 driver_state[];
+ u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
-#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
+#define TLS_DRIVER_STATE_SIZE_TX 16
};
#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
- (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
- TLS_DRIVER_STATE_SIZE)
+ (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
enum tls_context_flags {
TLS_RX_SYNC_RUNNING = 0,
+ /* Unlike RX where resync is driven entirely by the core in TX only
+ * the driver knows when things went out of sync, so we need the flag
+ * to be atomic.
+ */
+ TLS_TX_SYNC_SCHED = 1,
};
struct cipher_context {
@@ -240,45 +246,42 @@ struct tls_prot_info {
};
struct tls_context {
+ /* read-only cache line */
struct tls_prot_info prot_info;
- union tls_crypto_context crypto_send;
- union tls_crypto_context crypto_recv;
+ u8 tx_conf:3;
+ u8 rx_conf:3;
- struct list_head list;
- struct net_device *netdev;
- refcount_t refcount;
+ int (*push_pending_record)(struct sock *sk, int flags);
+ void (*sk_write_space)(struct sock *sk);
void *priv_ctx_tx;
void *priv_ctx_rx;
- u8 tx_conf:3;
- u8 rx_conf:3;
+ struct net_device *netdev;
+ /* rw cache line */
struct cipher_context tx;
struct cipher_context rx;
struct scatterlist *partially_sent_record;
u16 partially_sent_offset;
- unsigned long flags;
bool in_tcp_sendpages;
bool pending_open_record_frags;
+ unsigned long flags;
- int (*push_pending_record)(struct sock *sk, int flags);
+ /* cache cold stuff */
+ struct proto *sk_proto;
- void (*sk_write_space)(struct sock *sk);
void (*sk_destruct)(struct sock *sk);
- void (*sk_proto_close)(struct sock *sk, long timeout);
-
- int (*setsockopt)(struct sock *sk, int level,
- int optname, char __user *optval,
- unsigned int optlen);
- int (*getsockopt)(struct sock *sk, int level,
- int optname, char __user *optval,
- int __user *optlen);
- int (*hash)(struct sock *sk);
- void (*unhash)(struct sock *sk);
+
+ union tls_crypto_context crypto_send;
+ union tls_crypto_context crypto_recv;
+
+ struct list_head list;
+ refcount_t refcount;
+ struct rcu_head rcu;
};
enum tls_offload_ctx_dir {
@@ -294,25 +297,50 @@ struct tlsdev_ops {
void (*tls_dev_del)(struct net_device *netdev,
struct tls_context *ctx,
enum tls_offload_ctx_dir direction);
- void (*tls_dev_resync_rx)(struct net_device *netdev,
- struct sock *sk, u32 seq, u64 rcd_sn);
+ int (*tls_dev_resync)(struct net_device *netdev,
+ struct sock *sk, u32 seq, u8 *rcd_sn,
+ enum tls_offload_ctx_dir direction);
+};
+
+enum tls_offload_sync_type {
+ TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
+ TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
};
+#define TLS_DEVICE_RESYNC_NH_START_IVAL 2
+#define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
+
struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
- atomic64_t resync_req;
- u8 driver_state[];
+ enum tls_offload_sync_type resync_type;
+ /* this member is set regardless of resync_type, to avoid branches */
+ u8 resync_nh_reset:1;
+ /* CORE_NEXT_HINT-only member, but use the hole here */
+ u8 resync_nh_do_now:1;
+ union {
+ /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
+ struct {
+ atomic64_t resync_req;
+ };
+ /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
+ struct {
+ u32 decrypted_failed;
+ u32 decrypted_tgt;
+ } resync_nh;
+ };
+ u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
+#define TLS_DRIVER_STATE_SIZE_RX 8
};
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
- (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
- TLS_DRIVER_STATE_SIZE)
+ (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
+void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
@@ -320,13 +348,17 @@ int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
+void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
+void tls_sw_strparser_done(struct tls_context *tls_ctx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
-void tls_sw_close(struct sock *sk, long timeout);
-void tls_sw_free_resources_tx(struct sock *sk);
+void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
+void tls_sw_release_resources_tx(struct sock *sk);
+void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
void tls_sw_free_resources_rx(struct sock *sk);
void tls_sw_release_resources_rx(struct sock *sk);
+void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
bool tls_sw_stream_read(const struct sock *sk);
@@ -334,13 +366,9 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
-int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_device_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
-void tls_device_free_resources_tx(struct sock *sk);
-void tls_device_init(void);
-void tls_device_cleanup(void);
int tls_tx_records(struct sock *sk, int flags);
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
@@ -389,6 +417,23 @@ static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
return READ_ONCE(rec->tx_ready);
}
+static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
+{
+ u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
+
+ switch (config) {
+ case TLS_BASE:
+ return TLS_CONF_BASE;
+ case TLS_SW:
+ return TLS_CONF_SW;
+ case TLS_HW:
+ return TLS_CONF_HW;
+ case TLS_HW_RECORD:
+ return TLS_CONF_HW_RECORD;
+ }
+ return 0;
+}
+
struct sk_buff *
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
@@ -427,23 +472,22 @@ static inline struct tls_context *tls_get_ctx(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- return icsk->icsk_ulp_data;
+ /* Use RCU on icsk_ulp_data only for sock diag code,
+ * TLS data path doesn't need rcu_dereference().
+ */
+ return (__force void *)icsk->icsk_ulp_data;
}
static inline void tls_advance_record_sn(struct sock *sk,
- struct cipher_context *ctx,
- int version)
+ struct tls_prot_info *prot,
+ struct cipher_context *ctx)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_prot_info *prot = &tls_ctx->prot_info;
-
if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
tls_err_abort(sk, EBADMSG);
- if (version != TLS_1_3_VERSION) {
+ if (prot->version != TLS_1_3_VERSION)
tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
prot->iv_size);
- }
}
static inline void tls_fill_prepend(struct tls_context *ctx,
@@ -545,6 +589,23 @@ tls_offload_ctx_rx(const struct tls_context *tls_ctx)
return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
}
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ return tls_offload_ctx_tx(tls_ctx)->driver_state;
+ else
+ return tls_offload_ctx_rx(tls_ctx)->driver_state;
+}
+
+static inline void *
+tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
+{
+ return __tls_driver_ctx(tls_get_ctx(sk), direction);
+}
+#endif
+
/* The TLS context is valid until sk_destruct is called */
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{
@@ -554,14 +615,39 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1);
}
+static inline void
+tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ tls_offload_ctx_rx(tls_ctx)->resync_type = type;
+}
+
+static inline void tls_offload_tx_resync_request(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
+}
+
+/* Driver's seq tracking has to be disabled until resync succeeded */
+static inline bool tls_offload_tx_resync_pending(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ bool ret;
+
+ ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
+ smp_mb__after_atomic();
+ return ret;
+}
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
void tls_register_device(struct tls_device *device);
void tls_unregister_device(struct tls_device *device);
-int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout);
+struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
struct net_device *dev,
@@ -571,9 +657,40 @@ int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info);
+#ifdef CONFIG_TLS_DEVICE
+void tls_device_init(void);
+void tls_device_cleanup(void);
+int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
+void tls_device_free_resources_tx(struct sock *sk);
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
-
void tls_device_offload_cleanup_rx(struct sock *sk);
-void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
+void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
+int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
+#else
+static inline void tls_device_init(void) {}
+static inline void tls_device_cleanup(void) {}
+static inline int
+tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tls_device_free_resources_tx(struct sock *sk) {}
+
+static inline int
+tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
+static inline void
+tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
+
+static inline int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+#endif
#endif /* _TLS_OFFLOAD_H */