aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-10-28 13:33:41 -0700
committerDavid S. Miller <davem@davemloft.net>2019-10-28 13:33:41 -0700
commit2024305863d626bd44664c284fd609b6a56bb9ed (patch)
tree961563fa2222cd852303abc9eb321a95fed5d774 /include
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf (diff)
parentnet: add READ_ONCE() annotation in __skb_wait_for_more_packets() (diff)
downloadlinux-dev-2024305863d626bd44664c284fd609b6a56bb9ed.tar.xz
linux-dev-2024305863d626bd44664c284fd609b6a56bb9ed.zip
Merge branch 'net-avoid-KCSAN-splats'
Eric Dumazet says: ==================== net: avoid KCSAN splats Often times we use skb_queue_empty() without holding a lock, meaning that other cpus (or interrupt) can change the queue under us. This is fine, but we need to properly annotate the lockless intent to make sure the compiler wont over optimize things. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/skbuff.h33
1 files changed, 24 insertions, 9 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a391147c03d4..64a395c7f689 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1496,6 +1496,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
}
/**
+ * skb_queue_empty_lockless - check if a queue is empty
+ * @list: queue head
+ *
+ * Returns true if the queue is empty, false otherwise.
+ * This variant can be used in lockless contexts.
+ */
+static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
+{
+ return READ_ONCE(list->next) == (const struct sk_buff *) list;
+}
+
+
+/**
* skb_queue_is_last - check if skb is the last entry in the queue
* @list: queue head
* @skb: buffer
@@ -1848,9 +1861,11 @@ static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
- newsk->next = next;
- newsk->prev = prev;
- next->prev = prev->next = newsk;
+ /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
+ WRITE_ONCE(newsk->next, next);
+ WRITE_ONCE(newsk->prev, prev);
+ WRITE_ONCE(next->prev, newsk);
+ WRITE_ONCE(prev->next, newsk);
list->qlen++;
}
@@ -1861,11 +1876,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list,
struct sk_buff *first = list->next;
struct sk_buff *last = list->prev;
- first->prev = prev;
- prev->next = first;
+ WRITE_ONCE(first->prev, prev);
+ WRITE_ONCE(prev->next, first);
- last->next = next;
- next->prev = last;
+ WRITE_ONCE(last->next, next);
+ WRITE_ONCE(next->prev, last);
}
/**
@@ -2006,8 +2021,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
next = skb->next;
prev = skb->prev;
skb->next = skb->prev = NULL;
- next->prev = prev;
- prev->next = next;
+ WRITE_ONCE(next->prev, prev);
+ WRITE_ONCE(prev->next, next);
}
/**