aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/net/sctp/socket.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp/socket.c')
-rw-r--r--net/sctp/socket.c120
1 files changed, 89 insertions, 31 deletions
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 3a8318e518f1..1db478e34520 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -103,7 +103,7 @@ static int sctp_autobind(struct sock *sk);
static void sctp_sock_migrate(struct sock *, struct sock *,
struct sctp_association *, sctp_socket_type_t);
-static int sctp_memory_pressure;
+static unsigned long sctp_memory_pressure;
static atomic_long_t sctp_memory_allocated;
struct percpu_counter sctp_sockets_allocated;
@@ -164,7 +164,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
sizeof(struct sk_buff) +
sizeof(struct sctp_chunk);
- atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
+ refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
sk->sk_wmem_queued += chunk->skb->truesize;
sk_mem_charge(sk, chunk->skb->truesize);
}
@@ -1494,7 +1494,7 @@ static void sctp_close(struct sock *sk, long timeout)
pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_state = SCTP_SS_CLOSING;
@@ -1544,7 +1544,7 @@ static void sctp_close(struct sock *sk, long timeout)
* held and that should be grabbed before socket lock.
*/
spin_lock_bh(&net->sctp.addr_wq_lock);
- bh_lock_sock(sk);
+ bh_lock_sock_nested(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup.
@@ -1920,7 +1920,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
}
/* Check for invalid stream. */
- if (sinfo->sinfo_stream >= asoc->stream->outcnt) {
+ if (sinfo->sinfo_stream >= asoc->stream.outcnt) {
err = -EINVAL;
goto out_free;
}
@@ -4497,8 +4497,8 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
info->sctpi_rwnd = asoc->a_rwnd;
info->sctpi_unackdata = asoc->unack_data;
info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
- info->sctpi_instrms = asoc->stream->incnt;
- info->sctpi_outstrms = asoc->stream->outcnt;
+ info->sctpi_instrms = asoc->stream.incnt;
+ info->sctpi_outstrms = asoc->stream.outcnt;
list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
info->sctpi_inqueue++;
list_for_each(pos, &asoc->outqueue.out_chunk_list)
@@ -4726,8 +4726,8 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
status.sstat_unackdata = asoc->unack_data;
status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
- status.sstat_instrms = asoc->stream->incnt;
- status.sstat_outstrms = asoc->stream->outcnt;
+ status.sstat_instrms = asoc->stream.incnt;
+ status.sstat_outstrms = asoc->stream.outcnt;
status.sstat_fragmentation_point = asoc->frag_point;
status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
@@ -4933,11 +4933,47 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
}
EXPORT_SYMBOL(sctp_do_peeloff);
+static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *peeloff,
+ struct file **newfile, unsigned flags)
+{
+ struct socket *newsock;
+ int retval;
+
+ retval = sctp_do_peeloff(sk, peeloff->associd, &newsock);
+ if (retval < 0)
+ goto out;
+
+ /* Map the socket to an unused fd that can be returned to the user. */
+ retval = get_unused_fd_flags(flags & SOCK_CLOEXEC);
+ if (retval < 0) {
+ sock_release(newsock);
+ goto out;
+ }
+
+ *newfile = sock_alloc_file(newsock, 0, NULL);
+ if (IS_ERR(*newfile)) {
+ put_unused_fd(retval);
+ sock_release(newsock);
+ retval = PTR_ERR(*newfile);
+ *newfile = NULL;
+ return retval;
+ }
+
+ pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
+ retval);
+
+ peeloff->sd = retval;
+
+ if (flags & SOCK_NONBLOCK)
+ (*newfile)->f_flags |= O_NONBLOCK;
+out:
+ return retval;
+}
+
static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
sctp_peeloff_arg_t peeloff;
- struct socket *newsock;
- struct file *newfile;
+ struct file *newfile = NULL;
int retval = 0;
if (len < sizeof(sctp_peeloff_arg_t))
@@ -4946,26 +4982,44 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
if (copy_from_user(&peeloff, optval, len))
return -EFAULT;
- retval = sctp_do_peeloff(sk, peeloff.associd, &newsock);
+ retval = sctp_getsockopt_peeloff_common(sk, &peeloff, &newfile, 0);
if (retval < 0)
goto out;
- /* Map the socket to an unused fd that can be returned to the user. */
- retval = get_unused_fd_flags(0);
- if (retval < 0) {
- sock_release(newsock);
- goto out;
+ /* Return the fd mapped to the new socket. */
+ if (put_user(len, optlen)) {
+ fput(newfile);
+ put_unused_fd(retval);
+ return -EFAULT;
}
- newfile = sock_alloc_file(newsock, 0, NULL);
- if (IS_ERR(newfile)) {
+ if (copy_to_user(optval, &peeloff, len)) {
+ fput(newfile);
put_unused_fd(retval);
- sock_release(newsock);
- return PTR_ERR(newfile);
+ return -EFAULT;
}
+ fd_install(retval, newfile);
+out:
+ return retval;
+}
- pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
- retval);
+static int sctp_getsockopt_peeloff_flags(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ sctp_peeloff_flags_arg_t peeloff;
+ struct file *newfile = NULL;
+ int retval = 0;
+
+ if (len < sizeof(sctp_peeloff_flags_arg_t))
+ return -EINVAL;
+ len = sizeof(sctp_peeloff_flags_arg_t);
+ if (copy_from_user(&peeloff, optval, len))
+ return -EFAULT;
+
+ retval = sctp_getsockopt_peeloff_common(sk, &peeloff.p_arg,
+ &newfile, peeloff.flags);
+ if (retval < 0)
+ goto out;
/* Return the fd mapped to the new socket. */
if (put_user(len, optlen)) {
@@ -4973,7 +5027,7 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
put_unused_fd(retval);
return -EFAULT;
}
- peeloff.sd = retval;
+
if (copy_to_user(optval, &peeloff, len)) {
fput(newfile);
put_unused_fd(retval);
@@ -6033,7 +6087,8 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
return -EACCES;
hmacs = ep->auth_hmacs_list;
- data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
+ data_len = ntohs(hmacs->param_hdr.length) -
+ sizeof(struct sctp_paramhdr);
if (len < sizeof(struct sctp_hmacalgo) + data_len)
return -EINVAL;
@@ -6117,7 +6172,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
goto num;
/* See if the user provided enough room for all the data */
- num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
+ num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
if (len < num_chunks)
return -EINVAL;
@@ -6165,7 +6220,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
if (!ch)
goto num;
- num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
+ num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
if (len < sizeof(struct sctp_authchunks) + num_chunks)
return -EINVAL;
@@ -6599,10 +6654,10 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
- if (!asoc || params.sprstat_sid >= asoc->stream->outcnt)
+ if (!asoc || params.sprstat_sid >= asoc->stream.outcnt)
goto out;
- streamout = &asoc->stream->out[params.sprstat_sid];
+ streamout = &asoc->stream.out[params.sprstat_sid];
if (policy == SCTP_PR_SCTP_NONE) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
@@ -6758,6 +6813,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
case SCTP_SOCKOPT_PEELOFF:
retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
break;
+ case SCTP_SOCKOPT_PEELOFF_FLAGS:
+ retval = sctp_getsockopt_peeloff_flags(sk, len, optval, optlen);
+ break;
case SCTP_PEER_ADDR_PARAMS:
retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
optlen);
@@ -7563,7 +7621,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
if (flags & MSG_PEEK) {
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
} else {
skb = __skb_dequeue(&sk->sk_receive_queue);
}
@@ -7684,7 +7742,7 @@ static void sctp_wfree(struct sk_buff *skb)
sizeof(struct sk_buff) +
sizeof(struct sctp_chunk);
- atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
+ WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc));
/*
* This undoes what is done via sctp_set_owner_w and sk_mem_charge