aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/skb_array.h
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@s-opensource.com>2017-07-17 11:17:36 -0300
committerMauro Carvalho Chehab <mchehab@s-opensource.com>2017-07-17 11:17:36 -0300
commita3db9d60a118571e696b684a6e8c692a2b064941 (patch)
treeff7bae0f79b7a2ee0bce03de4f883550200c52a9 /include/linux/skb_array.h
parentmedia: staging: cxd2099: Activate cxd2099 buffer mode (diff)
parentLinux v4.13-rc1 (diff)
downloadwireguard-linux-a3db9d60a118571e696b684a6e8c692a2b064941.tar.xz
wireguard-linux-a3db9d60a118571e696b684a6e8c692a2b064941.zip
Merge tag 'v4.13-rc1' into patchwork
Linux v4.13-rc1 * tag 'v4.13-rc1': (11136 commits) Linux v4.13-rc1 random: reorder READ_ONCE() in get_random_uXX random: suppress spammy warnings about unseeded randomness replace incorrect strscpy use in FORTIFY_SOURCE kmod: throttle kmod thread limit kmod: add test driver to stress test the module loader MAINTAINERS: give kmod some maintainer love xtensa: use generic fb.h fault-inject: add /proc/<pid>/fail-nth fault-inject: simplify access check for fail-nth fault-inject: make fail-nth read/write interface symmetric fault-inject: parse as natural 1-based value for fail-nth write interface fault-inject: automatically detect the number base for fail-nth write interface kernel/watchdog.c: use better pr_fmt prefix MAINTAINERS: move the befs tree to kernel.org lib/atomic64_test.c: add a test that atomic64_inc_not_zero() returns an int mm: fix overflow check in expand_upwards() ubifs: Set double hash cookie also for RENAME_EXCHANGE ubifs: Massage assert in ubifs_xattr_set() wrt. init_xattrs ubifs: Don't leak kernel memory to the MTD ...
Diffstat (limited to 'include/linux/skb_array.h')
-rw-r--r--include/linux/skb_array.h31
1 files changed, 31 insertions, 0 deletions
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index f4dfade428f0..35226cd4efb0 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -97,21 +97,46 @@ static inline struct sk_buff *skb_array_consume(struct skb_array *a)
return ptr_ring_consume(&a->ring);
}
+static inline int skb_array_consume_batched(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched(&a->ring, (void **)array, n);
+}
+
static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
{
return ptr_ring_consume_irq(&a->ring);
}
+static inline int skb_array_consume_batched_irq(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
+}
+
static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
{
return ptr_ring_consume_any(&a->ring);
}
+static inline int skb_array_consume_batched_any(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
+}
+
+
static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
{
return ptr_ring_consume_bh(&a->ring);
}
+static inline int skb_array_consume_batched_bh(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
+}
+
static inline int __skb_array_len_with_tag(struct sk_buff *skb)
{
if (likely(skb)) {
@@ -156,6 +181,12 @@ static void __skb_array_destroy_skb(void *ptr)
kfree_skb(ptr);
}
+static inline void skb_array_unconsume(struct skb_array *a,
+ struct sk_buff **skbs, int n)
+{
+ ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
+}
+
static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
{
return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);