aboutsummaryrefslogtreecommitdiffstats
path: root/net/xdp
diff options
context:
space:
mode:
authorMagnus Karlsson <magnus.karlsson@intel.com>2022-08-12 13:32:59 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2022-08-15 17:26:07 +0200
commit58ca14ed98c87cfe0d1408cc65a9745d9e9b7a56 (patch)
treeb5147c832716f50fccee90819d5cab9c3a983b7e /net/xdp
parentselftests/bpf: Add lru_bug to s390x deny list (diff)
downloadlinux-dev-58ca14ed98c87cfe0d1408cc65a9745d9e9b7a56.tar.xz
linux-dev-58ca14ed98c87cfe0d1408cc65a9745d9e9b7a56.zip
xsk: Fix corrupted packets for XDP_SHARED_UMEM
Fix an issue in XDP_SHARED_UMEM mode together with aligned mode where packets are corrupted for the second and any further sockets bound to the same umem. In other words, this does not affect the first socket bound to the umem. The culprit for this bug is that the initialization of the DMA addresses for the pre-populated xsk buffer pool entries was not performed for any socket but the first one bound to the umem. Only the linear array of DMA addresses was populated. Fix this by populating the DMA addresses in the xsk buffer pool for every socket bound to the same umem. Fixes: 94033cd8e73b8 ("xsk: Optimize for aligned case") Reported-by: Alasdair McWilliam <alasdair.mcwilliam@outlook.com> Reported-by: Intrusion Shield Team <dnevil@intrusion.com> Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Tested-by: Alasdair McWilliam <alasdair.mcwilliam@outlook.com> Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Link: https://lore.kernel.org/xdp-newbies/6205E10C-292E-4995-9D10-409649354226@outlook.com/ Link: https://lore.kernel.org/bpf/20220812113259.531-1-magnus.karlsson@gmail.com
Diffstat (limited to 'net/xdp')
-rw-r--r--net/xdp/xsk_buff_pool.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index f70112176b7c..a71a8c6edf55 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -379,6 +379,16 @@ static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
{
+ if (!pool->unaligned) {
+ u32 i;
+
+ for (i = 0; i < pool->heads_cnt; i++) {
+ struct xdp_buff_xsk *xskb = &pool->heads[i];
+
+ xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
+ }
+ }
+
pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
if (!pool->dma_pages)
return -ENOMEM;
@@ -428,12 +438,6 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
if (pool->unaligned)
xp_check_dma_contiguity(dma_map);
- else
- for (i = 0; i < pool->heads_cnt; i++) {
- struct xdp_buff_xsk *xskb = &pool->heads[i];
-
- xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
- }
err = xp_init_dma_info(pool, dma_map);
if (err) {