aboutsummaryrefslogtreecommitdiffstats
path: root/net/xdp/xsk_queue.h
diff options
context:
space:
mode:
authorBjörn Töpel <bjorn.topel@intel.com>2020-05-20 21:21:02 +0200
committerAlexei Starovoitov <ast@kernel.org>2020-05-21 17:31:27 -0700
commit26062b185eee49142adc45f9aa187d909d02d961 (patch)
tree1d73827201d1c000c6c0381b199c9521c04214a6 /net/xdp/xsk_queue.h
parentxdp: Simplify xdp_return_{frame, frame_rx_napi, buff} (diff)
downloadlinux-dev-26062b185eee49142adc45f9aa187d909d02d961.tar.xz
linux-dev-26062b185eee49142adc45f9aa187d909d02d961.zip
xsk: Explicitly inline functions and move definitions
In order to reduce the number of function calls, the struct xsk_buff_pool definition is moved to xsk_buff_pool.h. The functions xp_get_dma(), xp_dma_sync_for_cpu(), xp_dma_sync_for_device(), xp_validate_desc() and various helper functions are explicitly inlined. Further, move xp_get_handle() and xp_release() to xsk.c, to allow for the compiler to perform inlining. rfc->v1: Make sure xp_validate_desc() is inlined for Tx perf. (Maxim) Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200520192103.355233-15-bjorn.topel@gmail.com
Diffstat (limited to 'net/xdp/xsk_queue.h')
-rw-r--r--net/xdp/xsk_queue.h45
1 files changed, 45 insertions, 0 deletions
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 16bf15864788..5b5d24d2dd37 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -118,6 +118,51 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
return false;
}
+static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
+ struct xdp_desc *desc)
+{
+ u64 chunk, chunk_end;
+
+ chunk = xp_aligned_extract_addr(pool, desc->addr);
+ chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len);
+ if (chunk != chunk_end)
+ return false;
+
+ if (chunk >= pool->addrs_cnt)
+ return false;
+
+ if (desc->options)
+ return false;
+ return true;
+}
+
+static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
+ struct xdp_desc *desc)
+{
+ u64 addr, base_addr;
+
+ base_addr = xp_unaligned_extract_addr(desc->addr);
+ addr = xp_unaligned_add_offset_to_addr(desc->addr);
+
+ if (desc->len > pool->chunk_size)
+ return false;
+
+ if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
+ xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
+ return false;
+
+ if (desc->options)
+ return false;
+ return true;
+}
+
+static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
+ struct xdp_desc *desc)
+{
+ return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
+ xp_aligned_validate_desc(pool, desc);
+}
+
static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
struct xdp_desc *d,
struct xdp_umem *umem)