aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2020-05-04 11:27:49 -0700
committerDavid S. Miller <davem@davemloft.net>2020-05-06 17:29:38 -0700
commit8dc242ad661c2694a582541c2264ffc0e7c4d27d (patch)
treec709098fe394ed7fbae16c836d90ee3f15c1d10e
parentarm64: dts: sdm845: add IPA iommus property (diff)
downloadwireguard-linux-8dc242ad661c2694a582541c2264ffc0e7c4d27d.tar.xz
wireguard-linux-8dc242ad661c2694a582541c2264ffc0e7c4d27d.zip
tcp: refine tcp_pacing_delay() for very low pacing rates
With the addition of horizon feature to sch_fq, we noticed some suboptimal behavior of extremely low pacing rate TCP flows, especially when TCP is not aware of a drop happening in lower stacks. Back in commit 3f80e08f40cd ("tcp: add tcp_reset_xmit_timer() helper"), tcp_pacing_delay() was added to estimate an extra delay to add to standard rto timers. This patch removes the skb argument from this helper and tcp_reset_xmit_timer() because it makes more sense to simply consider the time at which next packet is allowed to be sent, instead of the time of whatever packet has been sent. This avoids arming RTO timer too soon and removes spurious horizon drops. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/tcp.h21
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_output.c8
3 files changed, 13 insertions, 20 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 1beed50522b1..43b87a8d4790 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1289,26 +1289,22 @@ static inline bool tcp_needs_internal_pacing(const struct sock *sk)
return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
}
-/* Return in jiffies the delay before one skb is sent.
- * If @skb is NULL, we look at EDT for next packet being sent on the socket.
+/* Estimates in how many jiffies next packet for this flow can be sent.
+ * Scheduling a retransmit timer too early would be silly.
*/
-static inline unsigned long tcp_pacing_delay(const struct sock *sk,
- const struct sk_buff *skb)
+static inline unsigned long tcp_pacing_delay(const struct sock *sk)
{
- s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns;
+ s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
- pacing_delay -= tcp_sk(sk)->tcp_clock_cache;
-
- return pacing_delay > 0 ? nsecs_to_jiffies(pacing_delay) : 0;
+ return delay > 0 ? nsecs_to_jiffies(delay) : 0;
}
static inline void tcp_reset_xmit_timer(struct sock *sk,
const int what,
unsigned long when,
- const unsigned long max_when,
- const struct sk_buff *skb)
+ const unsigned long max_when)
{
- inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb),
+ inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
max_when);
}
@@ -1336,8 +1332,7 @@ static inline void tcp_check_probe_timer(struct sock *sk)
{
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_base(sk), TCP_RTO_MAX,
- NULL);
+ tcp_probe0_base(sk), TCP_RTO_MAX);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d68128a672ab..7d205b2a733c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3014,7 +3014,7 @@ void tcp_rearm_rto(struct sock *sk)
rto = usecs_to_jiffies(max_t(int, delta_us, 1));
}
tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
- TCP_RTO_MAX, tcp_rtx_queue_head(sk));
+ TCP_RTO_MAX);
}
}
@@ -3291,7 +3291,7 @@ static void tcp_ack_probe(struct sock *sk)
unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- when, TCP_RTO_MAX, NULL);
+ when, TCP_RTO_MAX);
}
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index c414aeb1efa9..32c9db902f18 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2593,8 +2593,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
if (rto_delta_us > 0)
timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
- tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
- TCP_RTO_MAX, NULL);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
return true;
}
@@ -3174,8 +3173,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX,
- skb);
+ TCP_RTO_MAX);
}
}
@@ -3907,7 +3905,7 @@ void tcp_send_probe0(struct sock *sk)
*/
timeout = TCP_RESOURCE_PROBE_INTERVAL;
}
- tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
}
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)