aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-05-06 17:29:38 -0700
committerDavid S. Miller <davem@davemloft.net>2020-05-06 17:29:38 -0700
commitee733cd8963903c494cbe1b43894eaa152932324 (patch)
tree098d0ead6696790e52d5789abe5609dfbbfce83c
parentarm64: dts: sdm845: add IPA iommus property (diff)
parenttcp: defer xmit timer reset in tcp_xmit_retransmit_queue() (diff)
downloadwireguard-linux-ee733cd8963903c494cbe1b43894eaa152932324.tar.xz
wireguard-linux-ee733cd8963903c494cbe1b43894eaa152932324.zip
Merge branch 'tcp-minor-adjustments-for-low-pacing-rates'
Eric Dumazet says: ==================== tcp: minor adjustments for low pacing rates After pacing horizon addition, we have to adjust how we arm rto timer, otherwise we might freeze very low pacing rate flows. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/tcp.h21
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_output.c22
3 files changed, 22 insertions, 25 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 1beed50522b1..43b87a8d4790 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1289,26 +1289,22 @@ static inline bool tcp_needs_internal_pacing(const struct sock *sk)
return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
}
-/* Return in jiffies the delay before one skb is sent.
- * If @skb is NULL, we look at EDT for next packet being sent on the socket.
+/* Estimates in how many jiffies next packet for this flow can be sent.
+ * Scheduling a retransmit timer too early would be silly.
*/
-static inline unsigned long tcp_pacing_delay(const struct sock *sk,
- const struct sk_buff *skb)
+static inline unsigned long tcp_pacing_delay(const struct sock *sk)
{
- s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns;
+ s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
- pacing_delay -= tcp_sk(sk)->tcp_clock_cache;
-
- return pacing_delay > 0 ? nsecs_to_jiffies(pacing_delay) : 0;
+ return delay > 0 ? nsecs_to_jiffies(delay) : 0;
}
static inline void tcp_reset_xmit_timer(struct sock *sk,
const int what,
unsigned long when,
- const unsigned long max_when,
- const struct sk_buff *skb)
+ const unsigned long max_when)
{
- inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb),
+ inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
max_when);
}
@@ -1336,8 +1332,7 @@ static inline void tcp_check_probe_timer(struct sock *sk)
{
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_base(sk), TCP_RTO_MAX,
- NULL);
+ tcp_probe0_base(sk), TCP_RTO_MAX);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d68128a672ab..7d205b2a733c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3014,7 +3014,7 @@ void tcp_rearm_rto(struct sock *sk)
rto = usecs_to_jiffies(max_t(int, delta_us, 1));
}
tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
- TCP_RTO_MAX, tcp_rtx_queue_head(sk));
+ TCP_RTO_MAX);
}
}
@@ -3291,7 +3291,7 @@ static void tcp_ack_probe(struct sock *sk)
unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- when, TCP_RTO_MAX, NULL);
+ when, TCP_RTO_MAX);
}
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index c414aeb1efa9..a50e1990a845 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2593,8 +2593,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
if (rto_delta_us > 0)
timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
- tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
- TCP_RTO_MAX, NULL);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
return true;
}
@@ -3113,6 +3112,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb, *rtx_head, *hole = NULL;
struct tcp_sock *tp = tcp_sk(sk);
+ bool rearm_timer = false;
u32 max_segs;
int mib_idx;
@@ -3135,7 +3135,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
if (segs <= 0)
- return;
+ break;
sacked = TCP_SKB_CB(skb)->sacked;
/* In case tcp_shift_skb_data() have aggregated large skbs,
* we need to make sure not sending too bigs TSO packets
@@ -3160,10 +3160,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
continue;
if (tcp_small_queue_check(sk, skb, 1))
- return;
+ break;
if (tcp_retransmit_skb(sk, skb, segs))
- return;
+ break;
NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
@@ -3172,11 +3172,13 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (skb == rtx_head &&
icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
- tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX,
- skb);
+ rearm_timer = true;
+
}
+ if (rearm_timer)
+ tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ inet_csk(sk)->icsk_rto,
+ TCP_RTO_MAX);
}
/* We allow to exceed memory limits for FIN packets to expedite
@@ -3907,7 +3909,7 @@ void tcp_send_probe0(struct sock *sk)
*/
timeout = TCP_RESOURCE_PROBE_INTERVAL;
}
- tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
}
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)