aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHannes Frederic Sowa <hannes@stressinduktion.org>2016-04-05 17:10:14 +0200
committerDavid S. Miller <davem@davemloft.net>2016-04-07 16:44:14 -0400
commit61881cfb5ad80c1d0a46ca6d08b7e271892b2ff6 (patch)
tree122cdc70b2d17ec6027143c442bfe0a26ce2b61f
parenttcp/dccp: fix inet_reuseport_add_sock() (diff)
downloadlinux-dev-61881cfb5ad80c1d0a46ca6d08b7e271892b2ff6.tar.xz
linux-dev-61881cfb5ad80c1d0a46ca6d08b7e271892b2ff6.zip
sock: fix lockdep annotation in release_sock
During release_sock we use callbacks to finish the processing of outstanding skbs on the socket. We actually are still locked, sk_locked.owned == 1, but we already told lockdep that the mutex is released. This could lead to false positives in lockdep for lockdep_sock_is_held (we don't hold the slock spinlock during processing the outstanding skbs). I took over this patch from Eric Dumazet and tested it. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sock.h7
-rw-r--r--net/core/sock.c5
2 files changed, 6 insertions, 6 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 1decb7a22261..91cee51086dc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1333,7 +1333,12 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
static inline void sock_release_ownership(struct sock *sk)
{
- sk->sk_lock.owned = 0;
+ if (sk->sk_lock.owned) {
+ sk->sk_lock.owned = 0;
+
+ /* The sk_lock has mutex_unlock() semantics: */
+ mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+ }
}
/*
diff --git a/net/core/sock.c b/net/core/sock.c
index 2ce76e82857f..152274d188ef 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2483,11 +2483,6 @@ EXPORT_SYMBOL(lock_sock_nested);
void release_sock(struct sock *sk)
{
- /*
- * The sk_lock has mutex_unlock() semantics:
- */
- mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
-
spin_lock_bh(&sk->sk_lock.slock);
if (sk->sk_backlog.tail)
__release_sock(sk);