aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/udp.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-10-28 13:33:41 -0700
committerDavid S. Miller <davem@davemloft.net>2019-10-28 13:33:41 -0700
commit2024305863d626bd44664c284fd609b6a56bb9ed (patch)
tree961563fa2222cd852303abc9eb321a95fed5d774 /net/ipv4/udp.c
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf (diff)
parentnet: add READ_ONCE() annotation in __skb_wait_for_more_packets() (diff)
downloadlinux-dev-2024305863d626bd44664c284fd609b6a56bb9ed.tar.xz
linux-dev-2024305863d626bd44664c284fd609b6a56bb9ed.zip
Merge branch 'net-avoid-KCSAN-splats'
Eric Dumazet says: ==================== net: avoid KCSAN splats Often times we use skb_queue_empty() without holding a lock, meaning that other cpus (or interrupt) can change the queue under us. This is fine, but we need to properly annotate the lockless intent to make sure the compiler wont over optimize things. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/udp.c')
-rw-r--r--net/ipv4/udp.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 14bc654b6842..345a3d43f5a6 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1577,7 +1577,7 @@ static int first_packet_length(struct sock *sk)
spin_lock_bh(&rcvq->lock);
skb = __first_packet_length(sk, rcvq, &total);
- if (!skb && !skb_queue_empty(sk_queue)) {
+ if (!skb && !skb_queue_empty_lockless(sk_queue)) {
spin_lock(&sk_queue->lock);
skb_queue_splice_tail_init(sk_queue, rcvq);
spin_unlock(&sk_queue->lock);
@@ -1650,7 +1650,7 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
return skb;
}
- if (skb_queue_empty(sk_queue)) {
+ if (skb_queue_empty_lockless(sk_queue)) {
spin_unlock_bh(&queue->lock);
goto busy_check;
}
@@ -1676,7 +1676,7 @@ busy_check:
break;
sk_busy_loop(sk, flags & MSG_DONTWAIT);
- } while (!skb_queue_empty(sk_queue));
+ } while (!skb_queue_empty_lockless(sk_queue));
/* sk_queue is empty, reader_queue may contain peeked packets */
} while (timeo &&
@@ -2712,7 +2712,7 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
__poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
- if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
+ if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
mask |= EPOLLIN | EPOLLRDNORM;
/* Check for false positives due to checksum errors */