aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2020-05-04 11:27:50 -0700
committerDavid S. Miller <davem@davemloft.net>2020-05-06 17:29:38 -0700
commit916e6d1a5ef17a6b3bffad0f086f173cde4240d8 (patch)
tree098d0ead6696790e52d5789abe5609dfbbfce83c /net/ipv4/tcp_output.c
parenttcp: refine tcp_pacing_delay() for very low pacing rates (diff)
downloadwireguard-linux-916e6d1a5ef17a6b3bffad0f086f173cde4240d8.tar.xz
wireguard-linux-916e6d1a5ef17a6b3bffad0f086f173cde4240d8.zip
tcp: defer xmit timer reset in tcp_xmit_retransmit_queue()
As hinted in prior change ("tcp: refine tcp_pacing_delay() for very low pacing rates"), it is probably best arming the xmit timer only when all the packets have been scheduled, rather than when the head of rtx queue has been re-sent. This does matter for flows having extremely low pacing rates, since their tp->tcp_wstamp_ns could be far in the future. Note that the regular xmit path has a stronger limit in tcp_small_queue_check(), meaning it is less likely to go beyond the pacing horizon. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 32c9db902f18..a50e1990a845 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3112,6 +3112,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb, *rtx_head, *hole = NULL;
struct tcp_sock *tp = tcp_sk(sk);
+ bool rearm_timer = false;
u32 max_segs;
int mib_idx;
@@ -3134,7 +3135,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
if (segs <= 0)
- return;
+ break;
sacked = TCP_SKB_CB(skb)->sacked;
/* In case tcp_shift_skb_data() have aggregated large skbs,
* we need to make sure not sending too bigs TSO packets
@@ -3159,10 +3160,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
continue;
if (tcp_small_queue_check(sk, skb, 1))
- return;
+ break;
if (tcp_retransmit_skb(sk, skb, segs))
- return;
+ break;
NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
@@ -3171,10 +3172,13 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (skb == rtx_head &&
icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
- tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ rearm_timer = true;
+
}
+ if (rearm_timer)
+ tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ inet_csk(sk)->icsk_rto,
+ TCP_RTO_MAX);
}
/* We allow to exceed memory limits for FIN packets to expedite