aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/tls-offload.rst19
-rw-r--r--include/net/tls.h34
-rw-r--r--net/tls/tls_device.c105
-rw-r--r--net/tls/tls_sw.c2
4 files changed, 145 insertions, 15 deletions
diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst
index eb7c9b81ccf5..d134d63307e7 100644
--- a/Documentation/networking/tls-offload.rst
+++ b/Documentation/networking/tls-offload.rst
@@ -268,6 +268,9 @@ Device can only detect that segment 4 also contains a TLS header
if it knows the length of the previous record from segment 2. In this case
the device will lose synchronization with the stream.
+Stream scan resynchronization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
When the device gets out of sync and the stream reaches TCP sequence
numbers more than a max size record past the expected TCP sequence number,
the device starts scanning for a known header pattern. For example
@@ -298,6 +301,22 @@ Special care has to be taken if the confirmation request is passed
asynchronously to the packet stream and record may get processed
by the kernel before the confirmation request.
+Stack-driven resynchronization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The driver may also request the stack to perform resynchronization
+whenever it sees the records are no longer getting decrypted.
+If the connection is configured in this mode the stack automatically
+schedules resynchronization after it has received two completely encrypted
+records.
+
+The stack waits for the socket to drain and informs the device about
+the next expected record number and its TCP sequence number. If the
+records continue to be received fully encrypted stack retries the
+synchronization with an exponential back off (first after 2 encrypted
+records, then after 4 records, after 8, after 16... up until every
+128 records).
+
Error handling
==============
diff --git a/include/net/tls.h b/include/net/tls.h
index 1c512da5e4f4..28eca6a3b615 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -303,10 +303,33 @@ struct tlsdev_ops {
struct sock *sk, u32 seq, u8 *rcd_sn);
};
+enum tls_offload_sync_type {
+ TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
+ TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
+};
+
+#define TLS_DEVICE_RESYNC_NH_START_IVAL 2
+#define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
+
struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
- atomic64_t resync_req;
+ enum tls_offload_sync_type resync_type;
+ /* this member is set regardless of resync_type, to avoid branches */
+ u8 resync_nh_reset:1;
+ /* CORE_NEXT_HINT-only member, but use the hole here */
+ u8 resync_nh_do_now:1;
+ union {
+ /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
+ struct {
+ atomic64_t resync_req;
+ };
+ /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
+ struct {
+ u32 decrypted_failed;
+ u32 decrypted_tgt;
+ } resync_nh;
+ };
u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
@@ -587,6 +610,13 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1);
}
+static inline void
+tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ tls_offload_ctx_rx(tls_ctx)->resync_type = type;
+}
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
@@ -608,6 +638,6 @@ int tls_sw_fallback_init(struct sock *sk,
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
void tls_device_offload_cleanup_rx(struct sock *sk);
-void tls_device_rx_resync_new_rec(struct sock *sk, u32 seq);
+void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
#endif /* _TLS_OFFLOAD_H */
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 0ecfa0ee415d..477c869c69c8 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -563,10 +563,12 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
}
-void tls_device_rx_resync_new_rec(struct sock *sk, u32 seq)
+void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx;
+ u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+ struct tls_prot_info *prot;
u32 is_req_pending;
s64 resync_req;
u32 req_seq;
@@ -574,15 +576,84 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 seq)
if (tls_ctx->rx_conf != TLS_HW)
return;
+ prot = &tls_ctx->prot_info;
rx_ctx = tls_offload_ctx_rx(tls_ctx);
- resync_req = atomic64_read(&rx_ctx->resync_req);
- req_seq = resync_req >> 32;
- seq += TLS_HEADER_SIZE - 1;
- is_req_pending = resync_req;
-
- if (unlikely(is_req_pending) && req_seq == seq &&
- atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
- tls_device_resync_rx(tls_ctx, sk, seq, tls_ctx->rx.rec_seq);
+ memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
+
+ switch (rx_ctx->resync_type) {
+ case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
+ resync_req = atomic64_read(&rx_ctx->resync_req);
+ req_seq = resync_req >> 32;
+ seq += TLS_HEADER_SIZE - 1;
+ is_req_pending = resync_req;
+
+ if (likely(!is_req_pending) || req_seq != seq ||
+ !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
+ return;
+ break;
+ case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
+ if (likely(!rx_ctx->resync_nh_do_now))
+ return;
+
+ /* head of next rec is already in, note that the sock_inq will
+ * include the currently parsed message when called from parser
+ */
+ if (tcp_inq(sk) > rcd_len)
+ return;
+
+ rx_ctx->resync_nh_do_now = 0;
+ seq += rcd_len;
+ tls_bigint_increment(rcd_sn, prot->rec_seq_size);
+ break;
+ }
+
+ tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
+}
+
+static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
+ struct tls_offload_context_rx *ctx,
+ struct sock *sk, struct sk_buff *skb)
+{
+ struct strp_msg *rxm;
+
+ /* device will request resyncs by itself based on stream scan */
+ if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
+ return;
+ /* already scheduled */
+ if (ctx->resync_nh_do_now)
+ return;
+ /* seen decrypted fragments since last fully-failed record */
+ if (ctx->resync_nh_reset) {
+ ctx->resync_nh_reset = 0;
+ ctx->resync_nh.decrypted_failed = 1;
+ ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
+ return;
+ }
+
+ if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
+ return;
+
+ /* doing resync, bump the next target in case it fails */
+ if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
+ ctx->resync_nh.decrypted_tgt *= 2;
+ else
+ ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
+
+ rxm = strp_msg(skb);
+
+ /* head of next rec is already in, parser will sync for us */
+ if (tcp_inq(sk) > rxm->full_len) {
+ ctx->resync_nh_do_now = 1;
+ } else {
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+
+ memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
+ tls_bigint_increment(rcd_sn, prot->rec_seq_size);
+
+ tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
+ rcd_sn);
+ }
}
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
@@ -686,12 +757,21 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
ctx->sw.decrypted |= is_decrypted;
- /* Return immedeatly if the record is either entirely plaintext or
+ /* Return immediately if the record is either entirely plaintext or
* entirely ciphertext. Otherwise handle reencrypt partially decrypted
* record.
*/
- return (is_encrypted || is_decrypted) ? 0 :
- tls_device_reencrypt(sk, skb);
+ if (is_decrypted) {
+ ctx->resync_nh_reset = 1;
+ return 0;
+ }
+ if (is_encrypted) {
+ tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
+ return 0;
+ }
+
+ ctx->resync_nh_reset = 1;
+ return tls_device_reencrypt(sk, skb);
}
static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
@@ -917,6 +997,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
rc = -ENOMEM;
goto release_netdev;
}
+ context->resync_nh_reset = 1;
ctx->priv_ctx_rx = context;
rc = tls_set_sw_offload(sk, ctx, 0);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index bc3a1b188d4a..533eaa4826e5 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2015,7 +2015,7 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
goto read_failure;
}
#ifdef CONFIG_TLS_DEVICE
- tls_device_rx_resync_new_rec(strp->sk,
+ tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
TCP_SKB_CB(skb)->seq + rxm->offset);
#endif
return data_len + TLS_HEADER_SIZE;