aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/udp.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-05-26 19:20:18 +0000
committerDavid S. Miller <davem@davemloft.net>2010-05-27 00:30:53 -0700
commit8a74ad60a546b13bd1096b2a61a7a5c6fd9ae17c (patch)
tree3110e7e59883597b5d0f617e8507e15b8f965f3f /net/ipv4/udp.c
parentnet/iucv: Add missing spin_unlock (diff)
downloadlinux-dev-8a74ad60a546b13bd1096b2a61a7a5c6fd9ae17c.tar.xz
linux-dev-8a74ad60a546b13bd1096b2a61a7a5c6fd9ae17c.zip
net: fix lock_sock_bh/unlock_sock_bh
This new sock lock primitive was introduced to speedup some user context socket manipulation. But it is unsafe to protect two threads, one using regular lock_sock/release_sock, one using lock_sock_bh/unlock_sock_bh This patch changes lock_sock_bh to be careful against 'owned' state. If owned is found to be set, we must take the slow path. lock_sock_bh() now returns a boolean to say if the slow path was taken, and this boolean is used at unlock_sock_bh time to call the appropriate unlock function. After this change, BH are either disabled or enabled during the lock_sock_bh/unlock_sock_bh protected section. This might be misleading, so we rename these functions to lock_sock_fast()/unlock_sock_fast(). Reported-by: Anton Blanchard <anton@samba.org> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Tested-by: Anton Blanchard <anton@samba.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/udp.c')
-rw-r--r--net/ipv4/udp.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9de6a698f91d..b9d0d409516f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk)
spin_unlock_bh(&rcvq->lock);
if (!skb_queue_empty(&list_kill)) {
- lock_sock_bh(sk);
+ bool slow = lock_sock_fast(sk);
+
__skb_queue_purge(&list_kill);
sk_mem_reclaim_partial(sk);
- unlock_sock_bh(sk);
+ unlock_sock_fast(sk, slow);
}
return res;
}
@@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
+ bool slow;
/*
* Check any passed addresses
@@ -1197,10 +1199,10 @@ out:
return err;
csum_copy_err:
- lock_sock_bh(sk);
+ slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags))
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- unlock_sock_bh(sk);
+ unlock_sock_fast(sk, slow);
if (noblock)
return -EAGAIN;
@@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb)
void udp_destroy_sock(struct sock *sk)
{
- lock_sock_bh(sk);
+ bool slow = lock_sock_fast(sk);
udp_flush_pending_frames(sk);
- unlock_sock_bh(sk);
+ unlock_sock_fast(sk, slow);
}
/*