diff mbox series

[v1,07/15] net: page pool: add helper creating area from pages

Message ID 20241007221603.1703699-8-dw@davidwei.uk (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series io_uring zero copy rx | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply, async

Commit Message

David Wei Oct. 7, 2024, 10:15 p.m. UTC
From: Pavel Begunkov <asml.silence@gmail.com>

Add a helper that takes an array of pages and initialises passed in
memory provider's area with them, where each net_iov takes one page.
It's also responsible for setting up dma mappings.

We keep it in page_pool.c not to leak netmem details to outside
providers like io_uring, which don't have access to netmem_priv.h
and other private helpers.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David Wei <dw@davidwei.uk>
---
 include/net/page_pool/types.h | 17 ++++++++++
 net/core/page_pool.c          | 61 +++++++++++++++++++++++++++++++++--
 2 files changed, 76 insertions(+), 2 deletions(-)

Comments

Mina Almasry Oct. 9, 2024, 9:11 p.m. UTC | #1
On Mon, Oct 7, 2024 at 3:16 PM David Wei <dw@davidwei.uk> wrote:
>
> From: Pavel Begunkov <asml.silence@gmail.com>
>
> Add a helper that takes an array of pages and initialises passed in
> memory provider's area with them, where each net_iov takes one page.
> It's also responsible for setting up dma mappings.
>
> We keep it in page_pool.c not to leak netmem details to outside
> providers like io_uring, which don't have access to netmem_priv.h
> and other private helpers.
>

Initial feeling is that this belongs somewhere in the provider. The
functions added here don't seem generically useful to the page pool to
be honest.

The challenge seems to be netmem/net_iov dependencies. The only thing
I see you're calling is net_iov_to_netmem() and friends. Are these the
issue? I think these are in netmem.h actually. Consider including that
in the provider implementation, if it makes sense to you.
Pavel Begunkov Oct. 9, 2024, 9:34 p.m. UTC | #2
On 10/9/24 22:11, Mina Almasry wrote:
> On Mon, Oct 7, 2024 at 3:16 PM David Wei <dw@davidwei.uk> wrote:
>>
>> From: Pavel Begunkov <asml.silence@gmail.com>
>>
>> Add a helper that takes an array of pages and initialises passed in
>> memory provider's area with them, where each net_iov takes one page.
>> It's also responsible for setting up dma mappings.
>>
>> We keep it in page_pool.c not to leak netmem details to outside
>> providers like io_uring, which don't have access to netmem_priv.h
>> and other private helpers.
>>
> 
> Initial feeling is that this belongs somewhere in the provider. The
> functions added here don't seem generically useful to the page pool to
> be honest.
> 
> The challenge seems to be netmem/net_iov dependencies. The only thing
> I see you're calling is net_iov_to_netmem() and friends. Are these the
> issue? I think these are in netmem.h actually. Consider including that
> in the provider implementation, if it makes sense to you.

io_uring would need bits from netmem_priv.h and page_pool_priv.h,
and Jakub was pushing hard for the devmem patches to hide all of it
under net/core/. It's a last week change, I believe Jakub doesn't
want any of those leaked outside, in which case net/ needs to
provide a helper.
diff mbox series

Patch

diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index fd0376ad0d26..1180ad07423c 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -271,6 +271,11 @@  void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
 			   const struct xdp_mem_info *mem);
 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
 			     int count);
+
+int page_pool_init_paged_area(struct page_pool *pool,
+			      struct net_iov_area *area, struct page **pages);
+void page_pool_release_area(struct page_pool *pool,
+			    struct net_iov_area *area);
 #else
 static inline void page_pool_destroy(struct page_pool *pool)
 {
@@ -286,6 +291,18 @@  static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
 					   int count)
 {
 }
+
+static inline int page_pool_init_paged_area(struct page_pool *pool,
+					    struct net_iov_area *area,
+					    struct page **pages)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline void page_pool_release_area(struct page_pool *pool,
+					  struct net_iov_area *area)
+{
+}
 #endif
 
 void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 9a675e16e6a4..112b6fe4b7ff 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -459,7 +459,8 @@  page_pool_dma_sync_for_device(const struct page_pool *pool,
 		__page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
 }
 
-static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
+static bool page_pool_dma_map_page(struct page_pool *pool, netmem_ref netmem,
+				   struct page *page)
 {
 	dma_addr_t dma;
 
@@ -468,7 +469,7 @@  static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
 	 * into page private data (i.e 32bit cpu with 64bit DMA caps)
 	 * This mapping is kept for lifetime of page, until leaving pool.
 	 */
-	dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0,
+	dma = dma_map_page_attrs(pool->p.dev, page, 0,
 				 (PAGE_SIZE << pool->p.order), pool->p.dma_dir,
 				 DMA_ATTR_SKIP_CPU_SYNC |
 					 DMA_ATTR_WEAK_ORDERING);
@@ -490,6 +491,11 @@  static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
 	return false;
 }
 
+static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
+{
+	return page_pool_dma_map_page(pool, netmem, netmem_to_page(netmem));
+}
+
 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
 						 gfp_t gfp)
 {
@@ -1154,3 +1160,54 @@  void page_pool_update_nid(struct page_pool *pool, int new_nid)
 	}
 }
 EXPORT_SYMBOL(page_pool_update_nid);
+
+static void page_pool_release_page_dma(struct page_pool *pool,
+				       netmem_ref netmem)
+{
+	__page_pool_release_page_dma(pool, netmem);
+}
+
+int page_pool_init_paged_area(struct page_pool *pool,
+			      struct net_iov_area *area, struct page **pages)
+{
+	struct net_iov *niov;
+	netmem_ref netmem;
+	int i, ret = 0;
+
+	if (!pool->dma_map)
+		return -EOPNOTSUPP;
+
+	for (i = 0; i < area->num_niovs; i++) {
+		niov = &area->niovs[i];
+		netmem = net_iov_to_netmem(niov);
+
+		page_pool_set_pp_info(pool, netmem);
+		if (!page_pool_dma_map_page(pool, netmem, pages[i])) {
+			ret = -EINVAL;
+			goto err_unmap_dma;
+		}
+	}
+	return 0;
+
+err_unmap_dma:
+	while (i--) {
+		netmem = net_iov_to_netmem(&area->niovs[i]);
+		page_pool_release_page_dma(pool, netmem);
+	}
+	return ret;
+}
+
+void page_pool_release_area(struct page_pool *pool,
+			    struct net_iov_area *area)
+{
+	int i;
+
+	if (!pool->dma_map)
+		return;
+
+	for (i = 0; i < area->num_niovs; i++) {
+		struct net_iov *niov = &area->niovs[i];
+
+		page_pool_release_page_dma(pool, net_iov_to_netmem(niov));
+	}
+}