diff options
| author | 2024-08-02 14:10:55 +0200 | |
|---|---|---|
| committer | 2024-08-02 14:10:55 +0200 | |
| commit | 4436e6da008fee87d54c038e983e5be9a6baf8fb (patch) | |
| tree | 265a15efcf6f17e0e32e258d66b274fc5cad41d4 /net/tls/tls_device.c | |
| parent | x86/mm: Cleanup prctl_enable_tagged_addr() nr_bits error checking (diff) | |
| parent | Linux 6.11-rc1 (diff) | |
| download | wireguard-linux-4436e6da008fee87d54c038e983e5be9a6baf8fb.tar.xz wireguard-linux-4436e6da008fee87d54c038e983e5be9a6baf8fb.zip | |
Merge branch 'linus' into x86/mm
Bring x86 and selftests up to date
Diffstat (limited to 'net/tls/tls_device.c')
| -rw-r--r-- | net/tls/tls_device.c | 11 |
1 files changed, 2 insertions, 9 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index ab6e694f7bc2..dc063c2c7950 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -231,14 +231,10 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, u32 seq) { struct net_device *netdev; - struct sk_buff *skb; int err = 0; u8 *rcd_sn; - skb = tcp_write_queue_tail(sk); - if (skb) - TCP_SKB_CB(skb)->eor = 1; - + tcp_write_collapse_fence(sk); rcd_sn = tls_ctx->tx.rec_seq; trace_tls_device_tx_resync_send(sk, seq, rcd_sn); @@ -1067,7 +1063,6 @@ int tls_set_device_offload(struct sock *sk) struct tls_prot_info *prot; struct net_device *netdev; struct tls_context *ctx; - struct sk_buff *skb; char *iv, *rec_seq; int rc; @@ -1138,9 +1133,7 @@ int tls_set_device_offload(struct sock *sk) * SKBs where only part of the payload needs to be encrypted. * So mark the last skb in the write queue as end of record. */ - skb = tcp_write_queue_tail(sk); - if (skb) - TCP_SKB_CB(skb)->eor = 1; + tcp_write_collapse_fence(sk); /* Avoid offloading if the device is down * We don't want to offload new flows after |
