aboutsummaryrefslogtreecommitdiffstats
path: root/net/xdp/xsk_queue.h
diff options
context:
space:
mode:
authorMagnus Karlsson <magnus.karlsson@intel.com>2019-12-19 13:39:23 +0100
committerAlexei Starovoitov <ast@kernel.org>2019-12-20 16:00:09 -0800
commit59e35e552529b858f35b30bc5a803ea532ca17f1 (patch)
treebfb1cc330e07c1a4b544dc0dddeb6d735985b8ed /net/xdp/xsk_queue.h
parentxsk: Consolidate to one single cached producer pointer (diff)
downloadlinux-dev-59e35e552529b858f35b30bc5a803ea532ca17f1.tar.xz
linux-dev-59e35e552529b858f35b30bc5a803ea532ca17f1.zip
xsk: Standardize naming of producer ring access functions
Adopt the naming of the producer ring access functions to have a similar naming convention as the functions in libbpf, but adapted to the kernel. You first reserve a number of entries that you later submit to the global state of the ring. This is much clearer, IMO, than the one that was in the kernel part. Once renamed, we also discover that two functions are actually the same, so remove one of them. Some of the primitive ring submission operations are also the same so break these out into __xskq_prod_submit that the upper level ring access functions can use. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/1576759171-28550-5-git-send-email-magnus.karlsson@intel.com
Diffstat (limited to 'net/xdp/xsk_queue.h')
-rw-r--r--net/xdp/xsk_queue.h58
1 files changed, 27 insertions, 31 deletions
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 7ad80748f209..1b9a350f2e66 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -216,22 +216,17 @@ static inline void xskq_discard_addr(struct xsk_queue *q)
q->cons_tail++;
}
-static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
+static inline int xskq_prod_reserve(struct xsk_queue *q)
{
- struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
- unsigned int idx = q->ring->producer;
+ if (xskq_nb_free(q, 1) == 0)
+ return -ENOSPC;
/* A, matches D */
- ring->desc[idx++ & q->ring_mask] = addr;
-
- /* Order producer and data */
- smp_wmb(); /* B, matches C */
-
- WRITE_ONCE(q->ring->producer, idx);
+ q->cached_prod++;
return 0;
}
-static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr)
+static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
{
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
@@ -243,23 +238,32 @@ static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr)
return 0;
}
-static inline void xskq_produce_flush_addr_n(struct xsk_queue *q,
- u32 nb_entries)
+static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
{
/* Order producer and data */
smp_wmb(); /* B, matches C */
- WRITE_ONCE(q->ring->producer, q->ring->producer + nb_entries);
+ WRITE_ONCE(q->ring->producer, idx);
}
-static inline int xskq_reserve_addr(struct xsk_queue *q)
+static inline void xskq_prod_submit(struct xsk_queue *q)
{
- if (xskq_nb_free(q, 1) == 0)
- return -ENOSPC;
+ __xskq_prod_submit(q, q->cached_prod);
+}
- /* A, matches D */
- q->cached_prod++;
- return 0;
+static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
+{
+ struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
+ u32 idx = q->ring->producer;
+
+ ring->desc[idx++ & q->ring_mask] = addr;
+
+ __xskq_prod_submit(q, idx);
+}
+
+static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
+{
+ __xskq_prod_submit(q, q->ring->producer + nb_entries);
}
/* Rx/Tx queue */
@@ -330,11 +334,11 @@ static inline void xskq_discard_desc(struct xsk_queue *q)
q->cons_tail++;
}
-static inline int xskq_produce_batch_desc(struct xsk_queue *q,
- u64 addr, u32 len)
+static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
+ u64 addr, u32 len)
{
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
- unsigned int idx;
+ u32 idx;
if (xskq_nb_free(q, 1) == 0)
return -ENOSPC;
@@ -347,14 +351,6 @@ static inline int xskq_produce_batch_desc(struct xsk_queue *q,
return 0;
}
-static inline void xskq_produce_flush_desc(struct xsk_queue *q)
-{
- /* Order producer and data */
- smp_wmb(); /* B, matches C */
-
- WRITE_ONCE(q->ring->producer, q->cached_prod);
-}
-
static inline bool xskq_full_desc(struct xsk_queue *q)
{
/* No barriers needed since data is not accessed */
@@ -362,7 +358,7 @@ static inline bool xskq_full_desc(struct xsk_queue *q)
q->nentries;
}
-static inline bool xskq_empty_desc(struct xsk_queue *q)
+static inline bool xskq_prod_is_empty(struct xsk_queue *q)
{
/* No barriers needed since data is not accessed */
return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);