aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-11-11 13:54:54 -0800
committerDavid S. Miller <davem@davemloft.net>2018-11-11 13:54:54 -0800
commit83afb36a7097bdf6776e261fa3a2bf2dc5a4a8c2 (patch)
tree1ba9698d64af24b2567bcd2d6cc590fb140ec386
parenttcp: minor optimization in tcp ack fast path processing (diff)
parenttcp: get rid of tcp_tso_should_defer() dependency on HZ/jiffies (diff)
downloadlinux-dev-83afb36a7097bdf6776e261fa3a2bf2dc5a4a8c2.tar.xz
linux-dev-83afb36a7097bdf6776e261fa3a2bf2dc5a4a8c2.zip
Merge branch 'tcp-tso-defer-improvements'
Eric Dumazet says: ==================== tcp: tso defer improvements This series makes tcp_tso_should_defer() a bit smarter : 1) MSG_EOR gives a hint to TCP to not defer some skbs 2) Second patch takes into account that head tstamp can be in the future. 3) Third patch uses existing high resolution state variables to have a more precise heuristic. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_output.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9c34b97d365d..75dcf4daca72 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1907,10 +1907,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
bool *is_cwnd_limited, u32 max_segs)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- u32 age, send_win, cong_win, limit, in_flight;
+ u32 send_win, cong_win, limit, in_flight;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *head;
int win_divisor;
+ s64 delta;
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto send_now;
@@ -1919,9 +1920,12 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
goto send_now;
/* Avoid bursty behavior by allowing defer
- * only if the last write was recent.
+ * only if the last write was recent (1 ms).
+ * Note that tp->tcp_wstamp_ns can be in the future if we have
+ * packets waiting in a qdisc or device for EDT delivery.
*/
- if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0)
+ delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
+ if (delta > 0)
goto send_now;
in_flight = tcp_packets_in_flight(tp);
@@ -1944,6 +1948,10 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
goto send_now;
+ /* If this packet won't get more data, do not wait. */
+ if (TCP_SKB_CB(skb)->eor)
+ goto send_now;
+
win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
if (win_divisor) {
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
@@ -1968,9 +1976,9 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
head = tcp_rtx_queue_head(sk);
if (!head)
goto send_now;
- age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
+ delta = tp->tcp_clock_cache - head->tstamp;
/* If next ACK is likely to come too late (half srtt), do not defer */
- if (age < (tp->srtt_us >> 4))
+ if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
goto send_now;
/* Ok, it looks like it is advisable to defer. */