aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_recovery.c
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2018-05-16 16:40:12 -0700
committerDavid S. Miller <davem@davemloft.net>2018-05-17 15:41:28 -0400
commit6ac06ecd3a5d1dd1aaea5c2a8f6d6e4c81d5de6a (patch)
tree66759ad3e1deda045d5656bd794ac30e54611ca6 /net/ipv4/tcp_recovery.c
parenttcp: disable RFC6675 loss detection (diff)
downloadlinux-dev-6ac06ecd3a5d1dd1aaea5c2a8f6d6e4c81d5de6a.tar.xz
linux-dev-6ac06ecd3a5d1dd1aaea5c2a8f6d6e4c81d5de6a.zip
tcp: simpler NewReno implementation
This is a rewrite of NewReno loss recovery implementation that is simpler and standalone for readability and better performance by using less states. Note that NewReno refers to RFC6582 as a modification to the fast recovery algorithm. It is used only if the connection does not support SACK in Linux. It should not to be confused with the Reno (AIMD) congestion control. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Soheil Hassas Yeganeh <soheil@google.com> Reviewed-by: Priyaranjan Jha <priyarjha@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_recovery.c')
-rw-r--r--net/ipv4/tcp_recovery.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 1c1bdf12a96f..299b0e38aa9a 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -216,3 +216,30 @@ void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
tp->rack.reo_wnd_steps = 1;
}
}
+
+/* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
+ * the next unacked packet upon receiving
+ * a) three or more DUPACKs to start the fast recovery
+ * b) an ACK acknowledging new data during the fast recovery.
+ */
+void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
+{
+ const u8 state = inet_csk(sk)->icsk_ca_state;
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
+ (state == TCP_CA_Recovery && snd_una_advanced)) {
+ struct sk_buff *skb = tcp_rtx_queue_head(sk);
+ u32 mss;
+
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
+ return;
+
+ mss = tcp_skb_mss(skb);
+ if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
+ tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
+ mss, mss, GFP_ATOMIC);
+
+ tcp_skb_mark_lost_uncond_verify(tp, skb);
+ }
+}