aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-07-30 19:48:37 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-31 02:28:30 -0700
commit1e757f9996114f713a79d3fbcd08739efcfc5c34 (patch)
tree169b4399c718ec6bf178ad1d77d8b8bf27d1d714 /net
parent[PPPOL2TP]: Add CONFIG_INET Kconfig dependency. (diff)
downloadlinux-dev-1e757f9996114f713a79d3fbcd08739efcfc5c34.tar.xz
linux-dev-1e757f9996114f713a79d3fbcd08739efcfc5c34.zip
[TCP]: Fix ratehalving with bidirectional flows
Actually, the ratehalving seems to work too well, as cwnd is reduced on every second ACK even though the packets in flight remains unchanged. Recoveries in a bidirectional flows suffer quite badly because of this, both NewReno and SACK are affected. After this patch, rate halving is performed for ACK only if packets in flight was supposedly changed too. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4b255fe999d9..41163ddc312c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1851,19 +1851,22 @@ static inline u32 tcp_cwnd_min(const struct sock *sk)
}
/* Decrease cwnd each second ack. */
-static void tcp_cwnd_down(struct sock *sk)
+static void tcp_cwnd_down(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
int decr = tp->snd_cwnd_cnt + 1;
- tp->snd_cwnd_cnt = decr&1;
- decr >>= 1;
+ if ((flag&FLAG_FORWARD_PROGRESS) ||
+ (IsReno(tp) && !(flag&FLAG_NOT_DUP))) {
+ tp->snd_cwnd_cnt = decr&1;
+ decr >>= 1;
- if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
- tp->snd_cwnd -= decr;
+ if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
+ tp->snd_cwnd -= decr;
- tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ }
}
/* Nothing was retransmitted or returned timestamp is less
@@ -2060,7 +2063,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
}
tcp_moderate_cwnd(tp);
} else {
- tcp_cwnd_down(sk);
+ tcp_cwnd_down(sk, flag);
}
}
@@ -2260,7 +2263,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
if (is_dupack || tcp_head_timedout(sk))
tcp_update_scoreboard(sk);
- tcp_cwnd_down(sk);
+ tcp_cwnd_down(sk, flag);
tcp_xmit_retransmit_queue(sk);
}