aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp
diff options
context:
space:
mode:
authorSridhar Samudrala <sri@us.ibm.com>2006-01-17 11:56:26 -0800
committerSridhar Samudrala <sri@us.ibm.com>2006-01-17 11:56:26 -0800
commitc4d2444e992c4eda1d7fc3287e93ba58295bf6b9 (patch)
tree04f2096c141ede308356bd2d8277d4c291fae24d /net/sctp
parent[SCTP]: Fix machine check/connection hang on IA64. (diff)
downloadlinux-dev-c4d2444e992c4eda1d7fc3287e93ba58295bf6b9.tar.xz
linux-dev-c4d2444e992c4eda1d7fc3287e93ba58295bf6b9.zip
[SCTP]: Fix couple of races between sctp_peeloff() and sctp_rcv().
Validate and update the sk in sctp_rcv() to avoid the race where an assoc/ep could move to a different socket after we get the sk, but before the skb is added to the backlog. Also migrate the skb's in backlog queue to new sk when doing a peeloff. Signed-off-by: Sridhar Samudrala <sri@us.ibm.com>
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/input.c35
-rw-r--r--net/sctp/socket.c4
2 files changed, 38 insertions, 1 deletions
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c463e4049c52..71fd56375641 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -257,12 +257,21 @@ int sctp_rcv(struct sk_buff *skb)
*/
sctp_bh_lock_sock(sk);
+ /* It is possible that the association could have moved to a different
+ * socket if it is peeled off. If so, update the sk.
+ */
+ if (sk != rcvr->sk) {
+ sctp_bh_lock_sock(rcvr->sk);
+ sctp_bh_unlock_sock(sk);
+ sk = rcvr->sk;
+ }
+
if (sock_owned_by_user(sk))
sk_add_backlog(sk, skb);
else
sctp_backlog_rcv(sk, skb);
- /* Release the sock and the sock ref we took in the lookup calls.
+ /* Release the sock and the sock ref we took in the lookup calls.
* The asoc/ep ref will be released in sctp_backlog_rcv.
*/
sctp_bh_unlock_sock(sk);
@@ -297,6 +306,9 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
struct sctp_ep_common *rcvr = NULL;
rcvr = chunk->rcvr;
+
+ BUG_TRAP(rcvr->sk == sk);
+
if (rcvr->dead) {
sctp_chunk_free(chunk);
} else {
@@ -313,6 +325,27 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
+void sctp_backlog_migrate(struct sctp_association *assoc,
+ struct sock *oldsk, struct sock *newsk)
+{
+ struct sk_buff *skb;
+ struct sctp_chunk *chunk;
+
+ skb = oldsk->sk_backlog.head;
+ oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
+ while (skb != NULL) {
+ struct sk_buff *next = skb->next;
+
+ chunk = SCTP_INPUT_CB(skb)->chunk;
+ skb->next = NULL;
+ if (&assoc->base == chunk->rcvr)
+ sk_add_backlog(newsk, skb);
+ else
+ sk_add_backlog(oldsk, skb);
+ skb = next;
+ }
+}
+
/* Handle icmp frag needed error. */
void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
struct sctp_transport *t, __u32 pmtu)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6a0b1af89932..fb1821d9f338 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5602,8 +5602,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
*/
newsp->type = type;
+ spin_lock_bh(&oldsk->sk_lock.slock);
+ /* Migrate the backlog from oldsk to newsk. */
+ sctp_backlog_migrate(assoc, oldsk, newsk);
/* Migrate the association to the new socket. */
sctp_assoc_migrate(assoc, newsk);
+ spin_unlock_bh(&oldsk->sk_lock.slock);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.