aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/net/page_pool.h
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2019-06-18 15:05:27 +0200
committerDavid S. Miller <davem@davemloft.net>2019-06-19 11:23:13 -0400
commit6bf071bf09d4b2ff3ee8783531e2ce814f0870cb (patch)
treee0d7a1d6e896f754c77f53f66cccb7a1b2b762f4 /include/net/page_pool.h
parentxdp: fix leak of IDA cyclic id if rhashtable_insert_slow fails (diff)
downloadwireguard-linux-6bf071bf09d4b2ff3ee8783531e2ce814f0870cb.tar.xz
wireguard-linux-6bf071bf09d4b2ff3ee8783531e2ce814f0870cb.zip
xdp: page_pool related fix to cpumap
When converting an xdp_frame into an SKB, and sending this into the network stack, then the underlying XDP memory model need to release associated resources, because the network stack don't have callbacks for XDP memory models. The only memory model that needs this is page_pool, when a driver use the DMA-mapping feature. Introduce page_pool_release_page(), which basically does the same as page_pool_unmap_page(). Add xdp_release_frame() as the XDP memory model interface for calling it, if the memory model match MEM_TYPE_PAGE_POOL, to save the function call overhead for others. Have cpumap call xdp_release_frame() before xdp_scrub_frame(). Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/page_pool.h')
-rw-r--r--include/net/page_pool.h15
1 files changed, 14 insertions, 1 deletions
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index ad218cef88c5..e240fac4c5b9 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -110,7 +110,6 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
struct page_pool *page_pool_create(const struct page_pool_params *params);
void page_pool_destroy(struct page_pool *pool);
-void page_pool_unmap_page(struct page_pool *pool, struct page *page);
/* Never call this directly, use helpers below */
void __page_pool_put_page(struct page_pool *pool,
@@ -133,6 +132,20 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
__page_pool_put_page(pool, page, true);
}
+/* Disconnects a page (from a page_pool). API users can have a need
+ * to disconnect a page (from a page_pool), to allow it to be used as
+ * a regular page (that will eventually be returned to the normal
+ * page-allocator via put_page).
+ */
+void page_pool_unmap_page(struct page_pool *pool, struct page *page);
+static inline void page_pool_release_page(struct page_pool *pool,
+ struct page *page)
+{
+#ifdef CONFIG_PAGE_POOL
+ page_pool_unmap_page(pool, page);
+#endif
+}
+
static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{
return page->dma_addr;