aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp/input.c')
-rw-r--r--net/sctp/input.c183
1 files changed, 105 insertions, 78 deletions
diff --git a/net/sctp/input.c b/net/sctp/input.c
index efaaefc3bb1c..4f43afa8678f 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -92,6 +92,7 @@ int sctp_rcv(struct sk_buff *skb)
struct sctp_chunk *chunk;
union sctp_addr src;
union sctp_addr dest;
+ int bound_dev_if;
int family;
struct sctp_af *af;
struct net *net = dev_net(skb->dev);
@@ -169,7 +170,8 @@ int sctp_rcv(struct sk_buff *skb)
* If a frame arrives on an interface and the receiving socket is
* bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
*/
- if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) {
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ if (bound_dev_if && (bound_dev_if != af->skb_iif(skb))) {
if (transport) {
sctp_transport_put(transport);
asoc = NULL;
@@ -385,7 +387,9 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
struct sctp_transport *t, __u32 pmtu)
{
- if (!t || (t->pathmtu <= pmtu))
+ if (!t ||
+ (t->pathmtu <= pmtu &&
+ t->pl.probe_size + sctp_transport_pl_hlen(t) <= pmtu))
return;
if (sock_owned_by_user(sk)) {
@@ -449,7 +453,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
else {
if (!mod_timer(&t->proto_unreach_timer,
jiffies + (HZ/20)))
- sctp_association_hold(asoc);
+ sctp_transport_hold(t);
}
} else {
struct net *net = sock_net(sk);
@@ -458,7 +462,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
"encountered!\n", __func__);
if (del_timer(&t->proto_unreach_timer))
- sctp_association_put(asoc);
+ sctp_transport_put(t);
sctp_do_sm(net, SCTP_EVENT_T_OTHER,
SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
@@ -548,11 +552,56 @@ out:
/* Common cleanup code for icmp/icmpv6 error handler. */
void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
+ __releases(&((__sk)->sk_lock.slock))
{
bh_unlock_sock(sk);
sctp_transport_put(t);
}
+static void sctp_v4_err_handle(struct sctp_transport *t, struct sk_buff *skb,
+ __u8 type, __u8 code, __u32 info)
+{
+ struct sctp_association *asoc = t->asoc;
+ struct sock *sk = asoc->base.sk;
+ int err = 0;
+
+ switch (type) {
+ case ICMP_PARAMETERPROB:
+ err = EPROTO;
+ break;
+ case ICMP_DEST_UNREACH:
+ if (code > NR_ICMP_UNREACH)
+ return;
+ if (code == ICMP_FRAG_NEEDED) {
+ sctp_icmp_frag_needed(sk, asoc, t, SCTP_TRUNC4(info));
+ return;
+ }
+ if (code == ICMP_PROT_UNREACH) {
+ sctp_icmp_proto_unreachable(sk, asoc, t);
+ return;
+ }
+ err = icmp_err_convert[code].errno;
+ break;
+ case ICMP_TIME_EXCEEDED:
+ if (code == ICMP_EXC_FRAGTIME)
+ return;
+
+ err = EHOSTUNREACH;
+ break;
+ case ICMP_REDIRECT:
+ sctp_icmp_redirect(sk, t, skb);
+ return;
+ default:
+ return;
+ }
+ if (!sock_owned_by_user(sk) && inet_sk(sk)->recverr) {
+ sk->sk_err = err;
+ sk_error_report(sk);
+ } else { /* Only an error on timeout */
+ sk->sk_err_soft = err;
+ }
+}
+
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
@@ -571,22 +620,19 @@ void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
int sctp_v4_err(struct sk_buff *skb, __u32 info)
{
const struct iphdr *iph = (const struct iphdr *)skb->data;
- const int ihlen = iph->ihl * 4;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
- struct sock *sk;
- struct sctp_association *asoc = NULL;
+ struct net *net = dev_net(skb->dev);
struct sctp_transport *transport;
- struct inet_sock *inet;
+ struct sctp_association *asoc;
__u16 saveip, savesctp;
- int err;
- struct net *net = dev_net(skb->dev);
+ struct sock *sk;
/* Fix up skb to look at the embedded net header. */
saveip = skb->network_header;
savesctp = skb->transport_header;
skb_reset_network_header(skb);
- skb_set_transport_header(skb, ihlen);
+ skb_set_transport_header(skb, iph->ihl * 4);
sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
/* Put back, the original values. */
skb->network_header = saveip;
@@ -595,59 +641,41 @@ int sctp_v4_err(struct sk_buff *skb, __u32 info)
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return -ENOENT;
}
- /* Warning: The sock lock is held. Remember to call
- * sctp_err_finish!
- */
- switch (type) {
- case ICMP_PARAMETERPROB:
- err = EPROTO;
- break;
- case ICMP_DEST_UNREACH:
- if (code > NR_ICMP_UNREACH)
- goto out_unlock;
+ sctp_v4_err_handle(transport, skb, type, code, info);
+ sctp_err_finish(sk, transport);
- /* PMTU discovery (RFC1191) */
- if (ICMP_FRAG_NEEDED == code) {
- sctp_icmp_frag_needed(sk, asoc, transport,
- SCTP_TRUNC4(info));
- goto out_unlock;
- } else {
- if (ICMP_PROT_UNREACH == code) {
- sctp_icmp_proto_unreachable(sk, asoc,
- transport);
- goto out_unlock;
- }
- }
- err = icmp_err_convert[code].errno;
- break;
- case ICMP_TIME_EXCEEDED:
- /* Ignore any time exceeded errors due to fragment reassembly
- * timeouts.
- */
- if (ICMP_EXC_FRAGTIME == code)
- goto out_unlock;
+ return 0;
+}
- err = EHOSTUNREACH;
- break;
- case ICMP_REDIRECT:
- sctp_icmp_redirect(sk, transport, skb);
- /* Fall through to out_unlock. */
- default:
- goto out_unlock;
+int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb)
+{
+ struct net *net = dev_net(skb->dev);
+ struct sctp_association *asoc;
+ struct sctp_transport *t;
+ struct icmphdr *hdr;
+ __u32 info = 0;
+
+ skb->transport_header += sizeof(struct udphdr);
+ sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &t);
+ if (!sk) {
+ __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
+ return -ENOENT;
}
- inet = inet_sk(sk);
- if (!sock_owned_by_user(sk) && inet->recverr) {
- sk->sk_err = err;
- sk->sk_error_report(sk);
- } else { /* Only an error on timeout */
- sk->sk_err_soft = err;
+ skb->transport_header -= sizeof(struct udphdr);
+ hdr = (struct icmphdr *)(skb_network_header(skb) - sizeof(struct icmphdr));
+ if (hdr->type == ICMP_REDIRECT) {
+ /* can't be handled without outer iphdr known, leave it to udp_err */
+ sctp_err_finish(sk, t);
+ return 0;
}
+ if (hdr->type == ICMP_DEST_UNREACH && hdr->code == ICMP_FRAG_NEEDED)
+ info = ntohs(hdr->un.frag.mtu);
+ sctp_v4_err_handle(t, skb, hdr->type, hdr->code, info);
-out_unlock:
- sctp_err_finish(sk, transport);
- return 0;
+ sctp_err_finish(sk, t);
+ return 1;
}
/*
@@ -676,7 +704,7 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
/* Break out if chunk length is less then minimal. */
- if (ntohs(ch->length) < sizeof(_ch))
+ if (!ch || ntohs(ch->length) < sizeof(_ch))
break;
ch_end = offset + SCTP_PAD4(ntohs(ch->length));
@@ -720,23 +748,21 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
struct sock *sk = ep->base.sk;
struct net *net = sock_net(sk);
struct sctp_hashbucket *head;
- struct sctp_ep_common *epb;
- epb = &ep->base;
- epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
- head = &sctp_ep_hashtable[epb->hashent];
+ ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port);
+ head = &sctp_ep_hashtable[ep->hashent];
if (sk->sk_reuseport) {
bool any = sctp_is_ep_boundall(sk);
- struct sctp_ep_common *epb2;
+ struct sctp_endpoint *ep2;
struct list_head *list;
int cnt = 0, err = 1;
list_for_each(list, &ep->base.bind_addr.address_list)
cnt++;
- sctp_for_each_hentry(epb2, &head->chain) {
- struct sock *sk2 = epb2->sk;
+ sctp_for_each_hentry(ep2, &head->chain) {
+ struct sock *sk2 = ep2->base.sk;
if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
!uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
@@ -763,7 +789,7 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
}
write_lock(&head->lock);
- hlist_add_head(&epb->node, &head->chain);
+ hlist_add_head(&ep->node, &head->chain);
write_unlock(&head->lock);
return 0;
}
@@ -785,19 +811,16 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
{
struct sock *sk = ep->base.sk;
struct sctp_hashbucket *head;
- struct sctp_ep_common *epb;
-
- epb = &ep->base;
- epb->hashent = sctp_ep_hashfn(sock_net(sk), epb->bind_addr.port);
+ ep->hashent = sctp_ep_hashfn(sock_net(sk), ep->base.bind_addr.port);
- head = &sctp_ep_hashtable[epb->hashent];
+ head = &sctp_ep_hashtable[ep->hashent];
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
write_lock(&head->lock);
- hlist_del_init(&epb->node);
+ hlist_del_init(&ep->node);
write_unlock(&head->lock);
}
@@ -830,7 +853,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
const union sctp_addr *paddr)
{
struct sctp_hashbucket *head;
- struct sctp_ep_common *epb;
struct sctp_endpoint *ep;
struct sock *sk;
__be16 lport;
@@ -840,8 +862,7 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
hash = sctp_ep_hashfn(net, ntohs(lport));
head = &sctp_ep_hashtable[hash];
read_lock(&head->lock);
- sctp_for_each_hentry(epb, &head->chain) {
- ep = sctp_ep(epb);
+ sctp_for_each_hentry(ep, &head->chain) {
if (sctp_endpoint_is_match(ep, net, laddr))
goto hit;
}
@@ -1130,7 +1151,8 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
if (!af)
continue;
- af->from_addr_param(paddr, params.addr, sh->source, 0);
+ if (!af->from_addr_param(paddr, params.addr, sh->source, 0))
+ continue;
asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
if (asoc)
@@ -1166,6 +1188,9 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
union sctp_addr_param *param;
union sctp_addr paddr;
+ if (ntohs(ch->length) < sizeof(*asconf) + sizeof(struct sctp_paramhdr))
+ return NULL;
+
/* Skip over the ADDIP header and find the Address parameter */
param = (union sctp_addr_param *)(asconf + 1);
@@ -1173,7 +1198,8 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
if (unlikely(!af))
return NULL;
- af->from_addr_param(&paddr, param, peer_port, 0);
+ if (!af->from_addr_param(&paddr, param, peer_port, 0))
+ return NULL;
return __sctp_lookup_association(net, laddr, &paddr, transportp);
}
@@ -1235,6 +1261,7 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
net, ch, laddr,
sctp_hdr(skb)->source,
transportp);
+ break;
default:
break;
}
@@ -1244,7 +1271,7 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
ch = (struct sctp_chunkhdr *)ch_end;
chunk_num++;
- } while (ch_end < skb_tail_pointer(skb));
+ } while (ch_end + sizeof(*ch) < skb_tail_pointer(skb));
return asoc;
}