aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inet_hashtables.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-11-20 20:39:09 -0800
committerDavid S. Miller <davem@davemloft.net>2008-11-20 20:39:09 -0800
commit9db66bdcc83749affe61c61eb8ff3cf08f42afec (patch)
tree81bb20e4f569d3b44731498428277db9d77fa7a9 /net/ipv4/inet_hashtables.c
parentipgre: convert to netdevice_ops (diff)
downloadlinux-dev-9db66bdcc83749affe61c61eb8ff3cf08f42afec.tar.xz
linux-dev-9db66bdcc83749affe61c61eb8ff3cf08f42afec.zip
net: convert TCP/DCCP ehash rwlocks to spinlocks
Now TCP & DCCP use RCU lookups, we can convert ehash rwlocks to spinlocks. /proc/net/tcp and other seq_file 'readers' can safely be converted to 'writers'. This should speedup writers, since spin_lock()/spin_unlock() only use one atomic operation instead of two for write_lock()/write_unlock() Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_hashtables.c')
-rw-r--r--net/ipv4/inet_hashtables.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 377d004e5723..4c273a9981a6 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -271,13 +271,12 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
struct net *net = sock_net(sk);
unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->dport);
struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
- rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
+ spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
struct sock *sk2;
const struct hlist_nulls_node *node;
struct inet_timewait_sock *tw;
- prefetch(head->chain.first);
- write_lock(lock);
+ spin_lock(lock);
/* Check TIME-WAIT sockets first. */
sk_nulls_for_each(sk2, node, &head->twchain) {
@@ -308,8 +307,8 @@ unique:
sk->sk_hash = hash;
WARN_ON(!sk_unhashed(sk));
__sk_nulls_add_node_rcu(sk, &head->chain);
+ spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- write_unlock(lock);
if (twp) {
*twp = tw;
@@ -325,7 +324,7 @@ unique:
return 0;
not_unique:
- write_unlock(lock);
+ spin_unlock(lock);
return -EADDRNOTAVAIL;
}
@@ -340,7 +339,7 @@ void __inet_hash_nolisten(struct sock *sk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct hlist_nulls_head *list;
- rwlock_t *lock;
+ spinlock_t *lock;
struct inet_ehash_bucket *head;
WARN_ON(!sk_unhashed(sk));
@@ -350,10 +349,10 @@ void __inet_hash_nolisten(struct sock *sk)
list = &head->chain;
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
- write_lock(lock);
+ spin_lock(lock);
__sk_nulls_add_node_rcu(sk, list);
+ spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- write_unlock(lock);
}
EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
@@ -402,12 +401,12 @@ void inet_unhash(struct sock *sk)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock_bh(&ilb->lock);
} else {
- rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
- write_lock_bh(lock);
+ spin_lock_bh(lock);
if (__sk_nulls_del_node_init_rcu(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- write_unlock_bh(lock);
+ spin_unlock_bh(lock);
}
}
EXPORT_SYMBOL_GPL(inet_unhash);