aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/associola.c27
-rw-r--r--net/sctp/bind_addr.c8
-rw-r--r--net/sctp/endpointola.c11
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/outqueue.c9
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c44
-rw-r--r--net/sctp/sm_sideeffect.c12
-rw-r--r--net/sctp/sm_statefuns.c28
-rw-r--r--net/sctp/socket.c84
-rw-r--r--net/sctp/transport.c9
11 files changed, 156 insertions, 86 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 9d05e13e92f6..27329ce9c311 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -441,7 +441,8 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
/* If the primary path is changing, assume that the
* user wants to use this new path.
*/
- if (transport->state != SCTP_INACTIVE)
+ if ((transport->state == SCTP_ACTIVE) ||
+ (transport->state == SCTP_UNKNOWN))
asoc->peer.active_path = transport;
/*
@@ -532,11 +533,11 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
port = addr->v4.sin_port;
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
- " port: %d state:%s\n",
+ " port: %d state:%d\n",
asoc,
addr,
addr->v4.sin_port,
- peer_state == SCTP_UNKNOWN?"UNKNOWN":"ACTIVE");
+ peer_state);
/* Set the port if it has not been set yet. */
if (0 == asoc->peer.port)
@@ -545,9 +546,12 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
/* Check to see if this is a duplicate. */
peer = sctp_assoc_lookup_paddr(asoc, addr);
if (peer) {
- if (peer_state == SCTP_ACTIVE &&
- peer->state == SCTP_UNKNOWN)
- peer->state = SCTP_ACTIVE;
+ if (peer->state == SCTP_UNKNOWN) {
+ if (peer_state == SCTP_ACTIVE)
+ peer->state = SCTP_ACTIVE;
+ if (peer_state == SCTP_UNCONFIRMED)
+ peer->state = SCTP_UNCONFIRMED;
+ }
return peer;
}
@@ -739,7 +743,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
list_for_each(pos, &asoc->peer.transport_addr_list) {
t = list_entry(pos, struct sctp_transport, transports);
- if (t->state == SCTP_INACTIVE)
+ if ((t->state == SCTP_INACTIVE) ||
+ (t->state == SCTP_UNCONFIRMED))
continue;
if (!first || t->last_time_heard > first->last_time_heard) {
second = first;
@@ -759,7 +764,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
* [If the primary is active but not most recent, bump the most
* recently used transport.]
*/
- if (asoc->peer.primary_path->state != SCTP_INACTIVE &&
+ if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
+ (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
first != asoc->peer.primary_path) {
second = first;
first = asoc->peer.primary_path;
@@ -1054,7 +1060,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
transports);
if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
sctp_assoc_add_peer(asoc, &trans->ipaddr,
- GFP_ATOMIC, SCTP_ACTIVE);
+ GFP_ATOMIC, trans->state);
}
asoc->ctsn_ack_point = asoc->next_tsn - 1;
@@ -1094,7 +1100,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
/* Try to find an active transport. */
- if (t->state != SCTP_INACTIVE) {
+ if ((t->state == SCTP_ACTIVE) ||
+ (t->state == SCTP_UNKNOWN)) {
break;
} else {
/* Keep track of the next transport in case
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 2b962627f631..2b9c12a170e5 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -146,7 +146,7 @@ void sctp_bind_addr_free(struct sctp_bind_addr *bp)
/* Add an address to the bind address list in the SCTP_bind_addr structure. */
int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
- gfp_t gfp)
+ __u8 use_as_src, gfp_t gfp)
{
struct sctp_sockaddr_entry *addr;
@@ -163,6 +163,8 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
if (!addr->a.v4.sin_port)
addr->a.v4.sin_port = bp->port;
+ addr->use_as_src = use_as_src;
+
INIT_LIST_HEAD(&addr->list);
list_add_tail(&addr->list, &bp->address_list);
SCTP_DBG_OBJCNT_INC(addr);
@@ -274,7 +276,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
}
af->from_addr_param(&addr, rawaddr, port, 0);
- retval = sctp_add_bind_addr(bp, &addr, gfp);
+ retval = sctp_add_bind_addr(bp, &addr, 1, gfp);
if (retval) {
/* Can't finish building the list, clean up. */
sctp_bind_addr_clean(bp);
@@ -367,7 +369,7 @@ static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
(((AF_INET6 == addr->sa.sa_family) &&
(flags & SCTP_ADDR6_ALLOWED) &&
(flags & SCTP_ADDR6_PEERSUPP))))
- error = sctp_add_bind_addr(dest, addr, gfp);
+ error = sctp_add_bind_addr(dest, addr, 1, gfp);
}
return error;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 67bd53070ee0..ffda1d680529 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -158,6 +158,12 @@ void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
void sctp_endpoint_free(struct sctp_endpoint *ep)
{
ep->base.dead = 1;
+
+ ep->base.sk->sk_state = SCTP_SS_CLOSED;
+
+ /* Unlink this endpoint, so we can't find it again! */
+ sctp_unhash_endpoint(ep);
+
sctp_endpoint_put(ep);
}
@@ -166,11 +172,6 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
{
SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
- ep->base.sk->sk_state = SCTP_SS_CLOSED;
-
- /* Unlink this endpoint, so we can't find it again! */
- sctp_unhash_endpoint(ep);
-
/* Free up the HMAC transform. */
sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 8ef08070c8b6..99c0cefc04e0 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -290,7 +290,8 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
sctp_read_lock(addr_lock);
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
- if ((laddr->a.sa.sa_family == AF_INET6) &&
+ if ((laddr->use_as_src) &&
+ (laddr->a.sa.sa_family == AF_INET6) &&
(scope <= sctp_scope(&laddr->a))) {
bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
if (!baddr || (matchlen < bmatchlen)) {
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index e5faa351aaad..30b710c54e64 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -691,7 +691,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
if (!new_transport) {
new_transport = asoc->peer.active_path;
- } else if (new_transport->state == SCTP_INACTIVE) {
+ } else if ((new_transport->state == SCTP_INACTIVE) ||
+ (new_transport->state == SCTP_UNCONFIRMED)) {
/* If the chunk is Heartbeat or Heartbeat Ack,
* send it to chunk->transport, even if it's
* inactive.
@@ -848,7 +849,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
*/
new_transport = chunk->transport;
if (!new_transport ||
- new_transport->state == SCTP_INACTIVE)
+ ((new_transport->state == SCTP_INACTIVE) ||
+ (new_transport->state == SCTP_UNCONFIRMED)))
new_transport = asoc->peer.active_path;
/* Change packets if necessary. */
@@ -1464,7 +1466,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
/* Mark the destination transport address as
* active if it is not so marked.
*/
- if (transport->state == SCTP_INACTIVE) {
+ if ((transport->state == SCTP_INACTIVE) ||
+ (transport->state == SCTP_UNCONFIRMED)) {
sctp_assoc_control_transport(
transport->asoc,
transport,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 816c033d7886..1ab03a27a76e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -240,7 +240,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
(((AF_INET6 == addr->a.sa.sa_family) &&
(copy_flags & SCTP_ADDR6_ALLOWED) &&
(copy_flags & SCTP_ADDR6_PEERSUPP)))) {
- error = sctp_add_bind_addr(bp, &addr->a,
+ error = sctp_add_bind_addr(bp, &addr->a, 1,
GFP_ATOMIC);
if (error)
goto end_copy;
@@ -486,6 +486,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sctp_sockaddr_entry,
list);
+ if (!laddr->use_as_src)
+ continue;
sctp_v4_dst_saddr(&dst_saddr, dst, bp->port);
if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
goto out_unlock;
@@ -506,7 +508,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
- if (AF_INET == laddr->a.sa.sa_family) {
+ if ((laddr->use_as_src) &&
+ (AF_INET == laddr->a.sa.sa_family)) {
fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
if (!ip_route_output_key(&rt, &fl)) {
dst = &rt->u.dst;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2a8773691695..17b509282cf2 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -806,38 +806,26 @@ no_mem:
/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */
struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
- const struct sctp_chunk *chunk,
- const struct msghdr *msg)
+ const struct msghdr *msg,
+ size_t paylen)
{
struct sctp_chunk *retval;
- void *payload = NULL, *payoff;
- size_t paylen = 0;
- struct iovec *iov = NULL;
- int iovlen = 0;
-
- if (msg) {
- iov = msg->msg_iov;
- iovlen = msg->msg_iovlen;
- paylen = get_user_iov_size(iov, iovlen);
- }
+ void *payload = NULL;
+ int err;
- retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen);
+ retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen);
if (!retval)
goto err_chunk;
if (paylen) {
/* Put the msg_iov together into payload. */
- payload = kmalloc(paylen, GFP_ATOMIC);
+ payload = kmalloc(paylen, GFP_KERNEL);
if (!payload)
goto err_payload;
- payoff = payload;
- for (; iovlen > 0; --iovlen) {
- if (copy_from_user(payoff, iov->iov_base,iov->iov_len))
- goto err_copy;
- payoff += iov->iov_len;
- iov++;
- }
+ err = memcpy_fromiovec(payload, msg->msg_iov, paylen);
+ if (err < 0)
+ goto err_copy;
}
sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
@@ -1493,7 +1481,7 @@ no_hmac:
/* Also, add the destination address. */
if (list_empty(&retval->base.bind_addr.address_list)) {
- sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest,
+ sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1,
GFP_ATOMIC);
}
@@ -2017,7 +2005,7 @@ static int sctp_process_param(struct sctp_association *asoc,
af->from_addr_param(&addr, param.addr, asoc->peer.port, 0);
scope = sctp_scope(peer_addr);
if (sctp_in_scope(&addr, scope))
- if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_ACTIVE))
+ if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
return 0;
break;
@@ -2418,7 +2406,7 @@ static __u16 sctp_process_asconf_param(struct sctp_association *asoc,
* Due to Resource Shortage'.
*/
- peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_ACTIVE);
+ peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
if (!peer)
return SCTP_ERROR_RSRC_LOW;
@@ -2565,6 +2553,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
union sctp_addr_param *addr_param;
struct list_head *pos;
struct sctp_transport *transport;
+ struct sctp_sockaddr_entry *saddr;
int retval = 0;
addr_param = (union sctp_addr_param *)
@@ -2578,7 +2567,11 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
case SCTP_PARAM_ADD_IP:
sctp_local_bh_disable();
sctp_write_lock(&asoc->base.addr_lock);
- retval = sctp_add_bind_addr(bp, &addr, GFP_ATOMIC);
+ list_for_each(pos, &bp->address_list) {
+ saddr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ if (sctp_cmp_addr_exact(&saddr->a, &addr))
+ saddr->use_as_src = 1;
+ }
sctp_write_unlock(&asoc->base.addr_lock);
sctp_local_bh_enable();
break;
@@ -2591,6 +2584,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
list_for_each(pos, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport,
transports);
+ dst_release(transport->dst);
sctp_transport_route(transport, NULL,
sctp_sk(asoc->base.sk));
}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index c5beb2ad7ef7..9c10bdec1afe 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -430,7 +430,11 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
/* The check for association's overall error counter exceeding the
* threshold is done in the state function.
*/
- asoc->overall_error_count++;
+ /* When probing UNCONFIRMED addresses, the association overall
+ * error count is NOT incremented
+ */
+ if (transport->state != SCTP_UNCONFIRMED)
+ asoc->overall_error_count++;
if (transport->state != SCTP_INACTIVE &&
(transport->error_count++ >= transport->pathmaxrxt)) {
@@ -610,7 +614,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
/* Mark the destination transport address as active if it is not so
* marked.
*/
- if (t->state == SCTP_INACTIVE)
+ if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED))
sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
SCTP_HEARTBEAT_SUCCESS);
@@ -620,6 +624,10 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
*/
hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
+
+ /* Update the heartbeat timer. */
+ if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
+ sctp_transport_hold(t);
}
/* Helper function to do a transport reset at the expiry of the hearbeat
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 9e58144f4851..5b5ae7958322 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -846,6 +846,7 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
hbinfo.daddr = transport->ipaddr;
hbinfo.sent_at = jiffies;
+ hbinfo.hb_nonce = transport->hb_nonce;
/* Send a heartbeat to our peer. */
paylen = sizeof(sctp_sender_hb_info_t);
@@ -1048,6 +1049,10 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
return SCTP_DISPOSITION_DISCARD;
}
+ /* Validate the 64-bit random nonce. */
+ if (hbinfo->hb_nonce != link->hb_nonce)
+ return SCTP_DISPOSITION_DISCARD;
+
max_interval = link->hbinterval + link->rto;
/* Check if the timestamp looks valid. */
@@ -4026,18 +4031,12 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
* from its upper layer, but retransmits data to the far end
* if necessary to fill gaps.
*/
- struct msghdr *msg = arg;
- struct sctp_chunk *abort;
+ struct sctp_chunk *abort = arg;
sctp_disposition_t retval;
retval = SCTP_DISPOSITION_CONSUME;
- /* Generate ABORT chunk to send the peer. */
- abort = sctp_make_abort_user(asoc, NULL, msg);
- if (!abort)
- retval = SCTP_DISPOSITION_NOMEM;
- else
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
/* Even if we can't send the ABORT due to low memory delete the
* TCB. This is a departure from our typical NOMEM handling.
@@ -4161,8 +4160,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
void *arg,
sctp_cmd_seq_t *commands)
{
- struct msghdr *msg = arg;
- struct sctp_chunk *abort;
+ struct sctp_chunk *abort = arg;
sctp_disposition_t retval;
/* Stop T1-init timer */
@@ -4170,12 +4168,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
retval = SCTP_DISPOSITION_CONSUME;
- /* Generate ABORT chunk to send the peer */
- abort = sctp_make_abort_user(asoc, NULL, msg);
- if (!abort)
- retval = SCTP_DISPOSITION_NOMEM;
- else
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
@@ -5278,7 +5271,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
datalen -= sizeof(sctp_data_chunk_t);
deliver = SCTP_CMD_CHUNK_ULP;
- chunk->data_accepted = 1;
/* Think about partial delivery. */
if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
@@ -5357,6 +5349,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
if (SCTP_CMD_CHUNK_ULP == deliver)
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
+ chunk->data_accepted = 1;
+
/* Note: Some chunks may get overcounted (if we drop) or overcounted
* if we renege and the chunk arrives again.
*/
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0a2c71d0d8aa..fde3f55bfd4b 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -369,7 +369,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
/* Use GFP_ATOMIC since BHs are disabled. */
addr->v4.sin_port = ntohs(addr->v4.sin_port);
- ret = sctp_add_bind_addr(bp, addr, GFP_ATOMIC);
+ ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC);
addr->v4.sin_port = htons(addr->v4.sin_port);
sctp_write_unlock(&ep->base.addr_lock);
sctp_local_bh_enable();
@@ -491,6 +491,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
struct sctp_chunk *chunk;
struct sctp_sockaddr_entry *laddr;
union sctp_addr *addr;
+ union sctp_addr saveaddr;
void *addr_buf;
struct sctp_af *af;
struct list_head *pos;
@@ -558,14 +559,26 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
}
retval = sctp_send_asconf(asoc, chunk);
+ if (retval)
+ goto out;
- /* FIXME: After sending the add address ASCONF chunk, we
- * cannot append the address to the association's binding
- * address list, because the new address may be used as the
- * source of a message sent to the peer before the ASCONF
- * chunk is received by the peer. So we should wait until
- * ASCONF_ACK is received.
+ /* Add the new addresses to the bind address list with
+ * use_as_src set to 0.
*/
+ sctp_local_bh_disable();
+ sctp_write_lock(&asoc->base.addr_lock);
+ addr_buf = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ addr = (union sctp_addr *)addr_buf;
+ af = sctp_get_af_specific(addr->v4.sin_family);
+ memcpy(&saveaddr, addr, af->sockaddr_len);
+ saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
+ retval = sctp_add_bind_addr(bp, &saveaddr, 0,
+ GFP_ATOMIC);
+ addr_buf += af->sockaddr_len;
+ }
+ sctp_write_unlock(&asoc->base.addr_lock);
+ sctp_local_bh_enable();
}
out:
@@ -676,12 +689,15 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
+ struct sctp_transport *transport;
struct sctp_bind_addr *bp;
struct sctp_chunk *chunk;
union sctp_addr *laddr;
+ union sctp_addr saveaddr;
void *addr_buf;
struct sctp_af *af;
- struct list_head *pos;
+ struct list_head *pos, *pos1;
+ struct sctp_sockaddr_entry *saddr;
int i;
int retval = 0;
@@ -748,14 +764,42 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
goto out;
}
- retval = sctp_send_asconf(asoc, chunk);
+ /* Reset use_as_src flag for the addresses in the bind address
+ * list that are to be deleted.
+ */
+ sctp_local_bh_disable();
+ sctp_write_lock(&asoc->base.addr_lock);
+ addr_buf = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ laddr = (union sctp_addr *)addr_buf;
+ af = sctp_get_af_specific(laddr->v4.sin_family);
+ memcpy(&saveaddr, laddr, af->sockaddr_len);
+ saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
+ list_for_each(pos1, &bp->address_list) {
+ saddr = list_entry(pos1,
+ struct sctp_sockaddr_entry,
+ list);
+ if (sctp_cmp_addr_exact(&saddr->a, &saveaddr))
+ saddr->use_as_src = 0;
+ }
+ addr_buf += af->sockaddr_len;
+ }
+ sctp_write_unlock(&asoc->base.addr_lock);
+ sctp_local_bh_enable();
- /* FIXME: After sending the delete address ASCONF chunk, we
- * cannot remove the addresses from the association's bind
- * address list, because there maybe some packet send to
- * the delete addresses, so we should wait until ASCONF_ACK
- * packet is received.
+ /* Update the route and saddr entries for all the transports
+ * as some of the addresses in the bind address list are
+ * about to be deleted and cannot be used as source addresses.
*/
+ list_for_each(pos1, &asoc->peer.transport_addr_list) {
+ transport = list_entry(pos1, struct sctp_transport,
+ transports);
+ dst_release(transport->dst);
+ sctp_transport_route(transport, NULL,
+ sctp_sk(asoc->base.sk));
+ }
+
+ retval = sctp_send_asconf(asoc, chunk);
}
out:
return retval;
@@ -1476,8 +1520,16 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock;
}
if (sinfo_flags & SCTP_ABORT) {
+ struct sctp_chunk *chunk;
+
+ chunk = sctp_make_abort_user(asoc, msg, msg_len);
+ if (!chunk) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
- sctp_primitive_ABORT(asoc, msg);
+ sctp_primitive_ABORT(asoc, chunk);
err = 0;
goto out_unlock;
}
@@ -4977,7 +5029,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
/* Caller must hold hashbucket lock for this tb with local BH disabled */
static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
{
- if (hlist_empty(&pp->owner)) {
+ if (pp && hlist_empty(&pp->owner)) {
if (pp->next)
pp->next->pprev = pp->pprev;
*(pp->pprev) = pp->next;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 160f62ad1cc5..2763aa93de1a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -49,6 +49,7 @@
*/
#include <linux/types.h>
+#include <linux/random.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
@@ -85,7 +86,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
peer->init_sent_count = 0;
- peer->state = SCTP_ACTIVE;
peer->param_flags = SPP_HB_DISABLE |
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
@@ -109,6 +109,9 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
peer->hb_timer.function = sctp_generate_heartbeat_event;
peer->hb_timer.data = (unsigned long)peer;
+ /* Initialize the 64-bit random nonce sent with heartbeat. */
+ get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
+
atomic_set(&peer->refcnt, 1);
peer->dead = 0;
@@ -517,7 +520,9 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
unsigned long sctp_transport_timeout(struct sctp_transport *t)
{
unsigned long timeout;
- timeout = t->hbinterval + t->rto + sctp_jitter(t->rto);
+ timeout = t->rto + sctp_jitter(t->rto);
+ if (t->state != SCTP_UNCONFIRMED)
+ timeout += t->hbinterval;
timeout += jiffies;
return timeout;
}