aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/net/mptcp/fastopen.c
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2025-02-18 19:36:13 +0100
committerJakub Kicinski <kuba@kernel.org>2025-02-19 19:05:28 -0800
commitf03afb3aeb9d81f6c5ab728a61a040012923e3b3 (patch)
tree7efbccc5e354f88ec8c40306ce3474d324ddc67a /net/mptcp/fastopen.c
parentmptcp: consolidate subflow cleanup (diff)
downloadwireguard-linux-f03afb3aeb9d81f6c5ab728a61a040012923e3b3.tar.xz
wireguard-linux-f03afb3aeb9d81f6c5ab728a61a040012923e3b3.zip
mptcp: drop __mptcp_fastopen_gen_msk_ackseq()
When we will move the whole RX path under the msk socket lock, updating the already queued skb for passive fastopen socket at 3rd ack time will be extremely painful and race prone The map_seq for already enqueued skbs is used only to allow correct coalescing with later data; preventing collapsing to the first skb of a fastopen connect we can completely remove the __mptcp_fastopen_gen_msk_ackseq() helper. Before dropping this helper, a new item had to be added to the mptcp_skb_cb structure. Because this item will be frequently tested in the fast path -- almost on every packet -- and because there is free space there, a single byte is used instead of a bitfield. This micro optimisation slightly reduces the number of CPU operations to do the associated check. Signed-off-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org> Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org> Link: https://patch.msgid.link/20250218-net-next-mptcp-rx-path-refactor-v1-2-4a47d90d7998@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/mptcp/fastopen.c')
-rw-r--r--net/mptcp/fastopen.c24
1 files changed, 2 insertions, 22 deletions
diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
index a29ff901df75..7777f5a2d143 100644
--- a/net/mptcp/fastopen.c
+++ b/net/mptcp/fastopen.c
@@ -40,13 +40,12 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
tp->copied_seq += skb->len;
subflow->ssn_offset += skb->len;
- /* initialize a dummy sequence number, we will update it at MPC
- * completion, if needed
- */
+ /* Only the sequence delta is relevant */
MPTCP_SKB_CB(skb)->map_seq = -skb->len;
MPTCP_SKB_CB(skb)->end_seq = 0;
MPTCP_SKB_CB(skb)->offset = 0;
MPTCP_SKB_CB(skb)->has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
+ MPTCP_SKB_CB(skb)->cant_coalesce = 1;
mptcp_data_lock(sk);
@@ -58,22 +57,3 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
mptcp_data_unlock(sk);
}
-
-void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
- const struct mptcp_options_received *mp_opt)
-{
- struct sock *sk = (struct sock *)msk;
- struct sk_buff *skb;
-
- skb = skb_peek_tail(&sk->sk_receive_queue);
- if (skb) {
- WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq);
- pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx\n", sk,
- MPTCP_SKB_CB(skb)->map_seq, MPTCP_SKB_CB(skb)->map_seq + msk->ack_seq,
- MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq);
- MPTCP_SKB_CB(skb)->map_seq += msk->ack_seq;
- MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq;
- }
-
- pr_debug("msk=%p ack_seq=%llx\n", msk, msk->ack_seq);
-}