aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--include/net/xdp_sock.h13
-rw-r--r--include/net/xsk_buff_pool.h27
2 files changed, 22 insertions, 18 deletions
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 282aeba0d20f..1a9559c0cbdd 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -23,13 +23,13 @@ struct xdp_umem {
u32 headroom;
u32 chunk_size;
u32 chunks;
+ u32 npgs;
struct user_struct *user;
refcount_t users;
- struct page **pgs;
- u32 npgs;
u8 flags;
- int id;
bool zc;
+ struct page **pgs;
+ int id;
struct list_head xsk_dma_list;
};
@@ -42,7 +42,7 @@ struct xsk_map {
struct xdp_sock {
/* struct sock must be the first member of struct xdp_sock */
struct sock sk;
- struct xsk_queue *rx;
+ struct xsk_queue *rx ____cacheline_aligned_in_smp;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
@@ -54,8 +54,7 @@ struct xdp_sock {
XSK_BOUND,
XSK_UNBOUND,
} state;
- /* Protects multiple processes in the control path */
- struct mutex mutex;
+
struct xsk_queue *tx ____cacheline_aligned_in_smp;
struct list_head tx_list;
/* Mutual exclusion of NAPI TX thread and sendmsg error paths
@@ -72,6 +71,8 @@ struct xdp_sock {
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
+ /* Protects multiple processes in the control path */
+ struct mutex mutex;
struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
};
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 356d0ac74eba..38d03a64c9ea 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -39,9 +39,22 @@ struct xsk_dma_map {
};
struct xsk_buff_pool {
- struct xsk_queue *fq;
- struct xsk_queue *cq;
+ /* Members only used in the control path first. */
+ struct device *dev;
+ struct net_device *netdev;
+ struct list_head xsk_tx_list;
+ /* Protects modifications to the xsk_tx_list */
+ spinlock_t xsk_tx_list_lock;
+ refcount_t users;
+ struct xdp_umem *umem;
+ struct work_struct work;
struct list_head free_list;
+ u32 heads_cnt;
+ u16 queue_id;
+
+ /* Data path members as close to free_heads at the end as possible. */
+ struct xsk_queue *fq ____cacheline_aligned_in_smp;
+ struct xsk_queue *cq;
/* For performance reasons, each buff pool has its own array of dma_pages
* even when they are identical.
*/
@@ -51,25 +64,15 @@ struct xsk_buff_pool {
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
- u32 heads_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 frame_len;
- u16 queue_id;
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
- struct xdp_umem *umem;
void *addrs;
- struct device *dev;
- struct net_device *netdev;
- struct list_head xsk_tx_list;
- /* Protects modifications to the xsk_tx_list */
- spinlock_t xsk_tx_list_lock;
- refcount_t users;
- struct work_struct work;
struct xdp_buff_xsk *free_heads[];
};