aboutsummaryrefslogtreecommitdiffstats
path: root/net/tls/tls_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tls/tls_device.c')
-rw-r--r--net/tls/tls_device.c89
1 files changed, 49 insertions, 40 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index cc0256939eb6..ca54a7c7ec81 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -89,22 +89,6 @@ static void tls_device_gc_task(struct work_struct *work)
}
}
-static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
- struct net_device *netdev)
-{
- if (sk->sk_destruct != tls_device_sk_destruct) {
- refcount_set(&ctx->refcount, 1);
- dev_hold(netdev);
- ctx->netdev = netdev;
- spin_lock_irq(&tls_device_lock);
- list_add_tail(&ctx->list, &tls_device_list);
- spin_unlock_irq(&tls_device_lock);
-
- ctx->sk_destruct = sk->sk_destruct;
- sk->sk_destruct = tls_device_sk_destruct;
- }
-}
-
static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
{
unsigned long flags;
@@ -199,7 +183,7 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
* socket and no in-flight SKBs associated with this
* socket, so it is safe to free all the resources.
*/
-void tls_device_sk_destruct(struct sock *sk)
+static void tls_device_sk_destruct(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
@@ -217,7 +201,6 @@ void tls_device_sk_destruct(struct sock *sk)
if (refcount_dec_and_test(&tls_ctx->refcount))
tls_device_queue_ctx_destruction(tls_ctx);
}
-EXPORT_SYMBOL(tls_device_sk_destruct);
void tls_device_free_resources_tx(struct sock *sk)
{
@@ -558,14 +541,11 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
{
- int rc = 0;
-
if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
gfp_t sk_allocation = sk->sk_allocation;
sk->sk_allocation = GFP_ATOMIC;
- rc = tls_push_partial_record(sk, ctx,
- MSG_DONTWAIT | MSG_NOSIGNAL);
+ tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL);
sk->sk_allocation = sk_allocation;
}
}
@@ -584,7 +564,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
rx_ctx = tls_offload_ctx_rx(tls_ctx);
resync_req = atomic64_read(&rx_ctx->resync_req);
- req_seq = ntohl(resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
+ req_seq = (resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
is_req_pending = resync_req;
if (unlikely(is_req_pending) && req_seq == seq &&
@@ -597,7 +577,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
{
struct strp_msg *rxm = strp_msg(skb);
- int err = 0, offset = rxm->offset, copy, nsg;
+ int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
struct sk_buff *skb_iter, *unused;
struct scatterlist sg[1];
char *orig_buf, *buf;
@@ -628,25 +608,42 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
else
err = 0;
- copy = min_t(int, skb_pagelen(skb) - offset,
- rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+ data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
- if (skb->decrypted)
- skb_store_bits(skb, offset, buf, copy);
+ if (skb_pagelen(skb) > offset) {
+ copy = min_t(int, skb_pagelen(skb) - offset, data_len);
- offset += copy;
- buf += copy;
+ if (skb->decrypted)
+ skb_store_bits(skb, offset, buf, copy);
+ offset += copy;
+ buf += copy;
+ }
+
+ pos = skb_pagelen(skb);
skb_walk_frags(skb, skb_iter) {
- copy = min_t(int, skb_iter->len,
- rxm->full_len - offset + rxm->offset -
- TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+ int frag_pos;
+
+ /* Practically all frags must belong to msg if reencrypt
+ * is needed with current strparser and coalescing logic,
+ * but strparser may "get optimized", so let's be safe.
+ */
+ if (pos + skb_iter->len <= offset)
+ goto done_with_frag;
+ if (pos >= data_len + rxm->offset)
+ break;
+
+ frag_pos = offset - pos;
+ copy = min_t(int, skb_iter->len - frag_pos,
+ data_len + rxm->offset - offset);
if (skb_iter->decrypted)
- skb_store_bits(skb_iter, offset, buf, copy);
+ skb_store_bits(skb_iter, frag_pos, buf, copy);
offset += copy;
buf += copy;
+done_with_frag:
+ pos += skb_iter->len;
}
free_buf:
@@ -682,6 +679,22 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
tls_device_reencrypt(sk, skb);
}
+static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
+ struct net_device *netdev)
+{
+ if (sk->sk_destruct != tls_device_sk_destruct) {
+ refcount_set(&ctx->refcount, 1);
+ dev_hold(netdev);
+ ctx->netdev = netdev;
+ spin_lock_irq(&tls_device_lock);
+ list_add_tail(&ctx->list, &tls_device_list);
+ spin_unlock_irq(&tls_device_lock);
+
+ ctx->sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = tls_device_sk_destruct;
+ }
+}
+
int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
{
u16 nonce_size, tag_size, iv_size, rec_seq_size;
@@ -865,8 +878,6 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
}
if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
- pr_err_ratelimited("%s: netdev %s with no TLS offload\n",
- __func__, netdev->name);
rc = -ENOTSUPP;
goto release_netdev;
}
@@ -894,11 +905,8 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
&ctx->crypto_recv.info,
tcp_sk(sk)->copied_seq);
- if (rc) {
- pr_err_ratelimited("%s: The netdev has refused to offload this socket\n",
- __func__);
+ if (rc)
goto free_sw_resources;
- }
tls_device_attach(ctx, sk, netdev);
goto release_netdev;
@@ -1025,4 +1033,5 @@ void __exit tls_device_cleanup(void)
{
unregister_netdevice_notifier(&tls_dev_notifier);
flush_work(&tls_device_gc_work);
+ clean_acked_data_flush();
}