aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/net/unix
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2020-02-28 14:45:21 +0100
committerDavid S. Miller <davem@davemloft.net>2020-02-28 12:12:53 -0800
commit7782040b950b5d0433f734fb2bba8b8b5ed6ce5a (patch)
tree70d2549b3467bf6ec5b3e96d7d679e111d1b984c /net/unix
parentaf_unix: Replace zero-length array with flexible-array member (diff)
downloadwireguard-linux-7782040b950b5d0433f734fb2bba8b8b5ed6ce5a.tar.xz
wireguard-linux-7782040b950b5d0433f734fb2bba8b8b5ed6ce5a.zip
unix: uses an atomic type for scm files accounting
So the scm_stat_{add,del} helper can be invoked with no additional lock held. This clean-up the code a bit and will make the next patch easier. Signed-off-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/unix')
-rw-r--r--net/unix/af_unix.c21
1 files changed, 6 insertions, 15 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 9d0518d9bdd4..c46fa271fc4a 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -690,7 +690,8 @@ static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
if (sk) {
u = unix_sk(sock->sk);
- seq_printf(m, "scm_fds: %u\n", READ_ONCE(u->scm_stat.nr_fds));
+ seq_printf(m, "scm_fds: %u\n",
+ atomic_read(&u->scm_stat.nr_fds));
}
}
#else
@@ -1602,10 +1603,8 @@ static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk);
- lockdep_assert_held(&sk->sk_receive_queue.lock);
-
if (unlikely(fp && fp->count))
- u->scm_stat.nr_fds += fp->count;
+ atomic_add(fp->count, &u->scm_stat.nr_fds);
}
static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
@@ -1613,10 +1612,8 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk);
- lockdep_assert_held(&sk->sk_receive_queue.lock);
-
if (unlikely(fp && fp->count))
- u->scm_stat.nr_fds -= fp->count;
+ atomic_sub(fp->count, &u->scm_stat.nr_fds);
}
/*
@@ -1805,10 +1802,8 @@ restart_locked:
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
maybe_add_creds(skb, sock, other);
- spin_lock(&other->sk_receive_queue.lock);
scm_stat_add(other, skb);
- __skb_queue_tail(&other->sk_receive_queue, skb);
- spin_unlock(&other->sk_receive_queue.lock);
+ skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other);
sock_put(other);
@@ -1910,10 +1905,8 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
goto pipe_err_free;
maybe_add_creds(skb, sock, other);
- spin_lock(&other->sk_receive_queue.lock);
scm_stat_add(other, skb);
- __skb_queue_tail(&other->sk_receive_queue, skb);
- spin_unlock(&other->sk_receive_queue.lock);
+ skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other);
sent += size;
@@ -2409,9 +2402,7 @@ unlock:
sk_peek_offset_bwd(sk, chunk);
if (UNIXCB(skb).fp) {
- spin_lock(&sk->sk_receive_queue.lock);
scm_stat_del(sk, skb);
- spin_unlock(&sk->sk_receive_queue.lock);
unix_detach_fds(&scm, skb);
}