aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/socket.c
diff options
context:
space:
mode:
authorVladislav Yasevich <vladislav.yasevich@hp.com>2006-05-19 11:01:18 -0700
committerSridhar Samudrala <sri@us.ibm.com>2006-05-19 11:01:18 -0700
commit61c9fed41638249f8b6ca5345064eb1beb50179f (patch)
tree8855a0e6cbee58a9d94e30396b7d0a1baa526900 /net/sctp/socket.c
parent[SCTP]: Set sk_err so that poll wakes up after a non-blocking connect failure. (diff)
downloadlinux-dev-61c9fed41638249f8b6ca5345064eb1beb50179f.tar.xz
linux-dev-61c9fed41638249f8b6ca5345064eb1beb50179f.zip
[SCTP]: A better solution to fix the race between sctp_peeloff() and
sctp_rcv(). The goal is to hold the ref on the association/endpoint throughout the state-machine process. We accomplish like this: /* ref on the assoc/ep is taken during lookup */ if owned_by_user(sk) sctp_add_backlog(skb, sk); else inqueue_push(skb, sk); /* drop the ref on the assoc/ep */ However, in sctp_add_backlog() we take the ref on assoc/ep and hold it while the skb is on the backlog queue. This allows us to get rid of the sock_hold/sock_put in the lookup routines. Now sctp_backlog_rcv() needs to account for potential association move. In the unlikely event that association moved, we need to retest if the new socket is locked by user. If we don't this, we may have two packets racing up the stack toward the same socket and we can't deal with it. If the new socket is still locked, we'll just add the skb to its backlog continuing to hold the ref on the association. This get's rid of the need to move packets from one backlog to another and it also safe in case new packets arrive on the same backlog queue. The last step, is to lock the new socket when we are moving the association to it. This is needed in case any new packets arrive on the association when it moved. We want these to go to the backlog since we would like to avoid the race between this new packet and a packet that may be sitting on the backlog queue of the old socket toward the same association. Signed-off-by: Vladislav Yasevich <vladislav.yasevich@hp.com> Signed-off-by: Sridhar Samudrala <sri@us.ibm.com>
Diffstat (limited to 'net/sctp/socket.c')
-rw-r--r--net/sctp/socket.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 90863307bcd9..b1a17758003a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1229,7 +1229,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
ep = sctp_sk(sk)->ep;
- /* Walk all associations on a socket, not on an endpoint. */
+ /* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
@@ -5318,6 +5318,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
*/
sctp_release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
+ BUG_ON(sk != asoc->base.sk);
sctp_lock_sock(sk);
*timeo_p = current_timeo;
@@ -5605,12 +5606,14 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
*/
newsp->type = type;
- spin_lock_bh(&oldsk->sk_lock.slock);
- /* Migrate the backlog from oldsk to newsk. */
- sctp_backlog_migrate(assoc, oldsk, newsk);
- /* Migrate the association to the new socket. */
+ /* Mark the new socket "in-use" by the user so that any packets
+ * that may arrive on the association after we've moved it are
+ * queued to the backlog. This prevents a potential race between
+ * backlog processing on the old socket and new-packet processing
+ * on the new socket.
+ */
+ sctp_lock_sock(newsk);
sctp_assoc_migrate(assoc, newsk);
- spin_unlock_bh(&oldsk->sk_lock.slock);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
@@ -5619,6 +5622,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
newsk->sk_shutdown |= RCV_SHUTDOWN;
newsk->sk_state = SCTP_SS_ESTABLISHED;
+ sctp_release_sock(newsk);
}
/* This proto struct describes the ULP interface for SCTP. */