aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/net/page_pool/helpers.h
diff options
context:
space:
mode:
authorMina Almasry <almasrymina@google.com>2024-12-11 21:20:31 +0000
committerJakub Kicinski <kuba@kernel.org>2024-12-12 18:49:08 -0800
commit7dba339faae991a23c54f7b93a58798c58f8c16f (patch)
treee6cdfdf0c2deb471a5e640b5c652005b456aab79 /include/net/page_pool/helpers.h
parentpage_pool: Set `dma_sync` to false for devmem memory provider (diff)
downloadwireguard-linux-7dba339faae991a23c54f7b93a58798c58f8c16f.tar.xz
wireguard-linux-7dba339faae991a23c54f7b93a58798c58f8c16f.zip
page_pool: disable sync for cpu for dmabuf memory provider
dmabuf dma-addresses should not be dma_sync'd for CPU/device. Typically its the driver responsibility to dma_sync for CPU, but the driver should not dma_sync for CPU if the netmem is actually coming from a dmabuf memory provider. The page_pool already exposes a helper for dma_sync_for_cpu: page_pool_dma_sync_for_cpu. Upgrade this existing helper to handle netmem, and have it skip dma_sync if the memory is from a dmabuf memory provider. Drivers should migrate to using this helper when adding support for netmem. Also minimize the impact on the dma syncing performance for pages. Special case the dma-sync path for pages to not go through the overhead checks for dma-syncing and conversion to netmem. Cc: Alexander Lobakin <aleksander.lobakin@intel.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Signed-off-by: Mina Almasry <almasrymina@google.com> Link: https://patch.msgid.link/20241211212033.1684197-5-almasrymina@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/net/page_pool/helpers.h')
-rw-r--r--include/net/page_pool/helpers.h35
1 files changed, 30 insertions, 5 deletions
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 95af7f0b029e..e555921e5233 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -422,7 +422,21 @@ static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
*/
static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
{
- return page_pool_get_dma_addr_netmem(page_to_netmem((struct page *)page));
+ dma_addr_t ret = page->dma_addr;
+
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
+ ret <<= PAGE_SHIFT;
+
+ return ret;
+}
+
+static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool,
+ const dma_addr_t dma_addr,
+ u32 offset, u32 dma_sync_size)
+{
+ dma_sync_single_range_for_cpu(pool->p.dev, dma_addr,
+ offset + pool->p.offset, dma_sync_size,
+ page_pool_get_dma_dir(pool));
}
/**
@@ -441,10 +455,21 @@ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
const struct page *page,
u32 offset, u32 dma_sync_size)
{
- dma_sync_single_range_for_cpu(pool->p.dev,
- page_pool_get_dma_addr(page),
- offset + pool->p.offset, dma_sync_size,
- page_pool_get_dma_dir(pool));
+ __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr(page), offset,
+ dma_sync_size);
+}
+
+static inline void
+page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool,
+ const netmem_ref netmem, u32 offset,
+ u32 dma_sync_size)
+{
+ if (!pool->dma_sync_for_cpu)
+ return;
+
+ __page_pool_dma_sync_for_cpu(pool,
+ page_pool_get_dma_addr_netmem(netmem),
+ offset, dma_sync_size);
}
static inline bool page_pool_put(struct page_pool *pool)