aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/ccids/ccid2.c
diff options
context:
space:
mode:
authorGerrit Renker <gerrit@erg.abdn.ac.uk>2008-09-04 07:30:19 +0200
committerGerrit Renker <gerrit@erg.abdn.ac.uk>2008-09-04 07:45:38 +0200
commit83337dae6ca94d801b6700600244865cd694205b (patch)
tree8d46dcc50324ce3842f94c40a332b9689e488eab /net/dccp/ccids/ccid2.c
parentdccp: Refine the wait-for-ccid mechanism (diff)
downloadlinux-dev-83337dae6ca94d801b6700600244865cd694205b.tar.xz
linux-dev-83337dae6ca94d801b6700600244865cd694205b.zip
dccp ccid-2: Stop polling
This updates CCID2 to use the CCID dequeuing mechanism, converting from previous constant-polling to a now event-driven mechanism. Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Diffstat (limited to '')
-rw-r--r--net/dccp/ccids/ccid2.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index bbf16b35734d..c7d83e3c1648 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -123,12 +123,9 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx)
static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
{
- struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
-
- if (hctx->pipe < hctx->cwnd)
- return 0;
-
- return 1; /* XXX CCID should dequeue when ready instead of polling */
+ if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
+ return CCID_PACKET_WILL_DEQUEUE_LATER;
+ return CCID_PACKET_SEND_AT_ONCE;
}
static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
@@ -168,6 +165,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
{
struct sock *sk = (struct sock *)data;
struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
+ const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx);
long s;
bh_lock_sock(sk);
@@ -187,8 +185,6 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
if (s > 60)
hctx->rto = 60 * HZ;
- ccid2_start_rto_timer(sk);
-
/* adjust pipe, cwnd etc */
hctx->ssthresh = hctx->cwnd / 2;
if (hctx->ssthresh < 2)
@@ -205,6 +201,11 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
hctx->rpdupack = -1;
ccid2_change_l_ack_ratio(sk, 1);
ccid2_hc_tx_check_sanity(hctx);
+
+ /* if we were blocked before, we may now send cwnd=1 packet */
+ if (sender_was_blocked)
+ tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+ ccid2_start_rto_timer(sk);
out:
bh_unlock_sock(sk);
sock_put(sk);
@@ -455,6 +456,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
{
struct dccp_sock *dp = dccp_sk(sk);
struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
+ const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx);
struct dccp_ackvec_parsed *avp;
u64 ackno, seqno;
struct ccid2_seq *seqp;
@@ -640,6 +642,9 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
ccid2_hc_tx_check_sanity(hctx);
done:
+ /* check if incoming Acks allow pending packets to be sent */
+ if (sender_was_blocked && !ccid2_cwnd_network_limited(hctx))
+ tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
dccp_ackvec_parsed_cleanup(&hctx->av_chunks);
}