aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 00:25:12 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 15:27:07 -0700
commitda21f24dd73954c2ed0cd39a698e2c9916c05d71 (patch)
tree22829e3791f0602d284b8f5398526b228faa9501
parent[PATCH] lockdep: annotate hrtimer base locks (diff)
downloadlinux-dev-da21f24dd73954c2ed0cd39a698e2c9916c05d71.tar.xz
linux-dev-da21f24dd73954c2ed0cd39a698e2c9916c05d71.zip
[PATCH] lockdep: annotate sock_lock_init()
Teach special (multi-initialized, per-address-family) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/net/sock.h4
-rw-r--r--net/core/sock.c16
2 files changed, 20 insertions, 0 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 7b3d6b856946..83805feea880 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -80,8 +80,12 @@ typedef struct {
wait_queue_head_t wq;
} socket_lock_t;
+extern struct lock_class_key af_family_keys[AF_MAX];
+
#define sock_lock_init(__sk) \
do { spin_lock_init(&((__sk)->sk_lock.slock)); \
+ lockdep_set_class(&(__sk)->sk_lock.slock, \
+ af_family_keys + (__sk)->sk_family); \
(__sk)->sk_lock.owner = NULL; \
init_waitqueue_head(&((__sk)->sk_lock.wq)); \
} while(0)
diff --git a/net/core/sock.c b/net/core/sock.c
index 533b9317144b..0b4d5d25b23c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -129,6 +129,18 @@
#include <net/tcp.h>
#endif
+/*
+ * Each address family might have different locking rules, so we have
+ * one slock key per address family:
+ */
+struct lock_class_key af_family_keys[AF_MAX];
+
+/*
+ * sk_callback_lock locking rules are per-address-family,
+ * so split the lock classes by using a per-AF key:
+ */
+static struct lock_class_key af_callback_keys[AF_MAX];
+
/* Take into consideration the size of the struct sk_buff overhead in the
* determination of these values, since that is non-constant across
* platforms. This makes socket queueing behavior and performance
@@ -848,6 +860,8 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
rwlock_init(&newsk->sk_dst_lock);
rwlock_init(&newsk->sk_callback_lock);
+ lockdep_set_class(&newsk->sk_callback_lock,
+ af_callback_keys + newsk->sk_family);
newsk->sk_dst_cache = NULL;
newsk->sk_wmem_queued = 0;
@@ -1422,6 +1436,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
rwlock_init(&sk->sk_dst_lock);
rwlock_init(&sk->sk_callback_lock);
+ lockdep_set_class(&sk->sk_callback_lock,
+ af_callback_keys + sk->sk_family);
sk->sk_state_change = sock_def_wakeup;
sk->sk_data_ready = sock_def_readable;