diff mbox series

[v2,08/24] page_pool: Convert pp_alloc_cache to contain netmem

Message ID 20230105214631.3939268-9-willy@infradead.org (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series Split netmem from struct page | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-17 fail Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 fail Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32 on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-21 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 fail Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-31 success Logs for test_progs_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-34 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-36 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-38 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ${{ matrix.test }} on ${{ matrix.arch }} with ${{ matrix.toolchain }}
bpf/vmtest-bpf-next-VM_Test-2 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-5 fail Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-8 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-9 success Logs for set-matrix
netdev/tree_selection success Guessed tree name to be net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix warning Target tree name not specified in the subject
netdev/cover_letter success Series has a cover letter
netdev/patch_count fail Series longer than 15 patches (and no cover letter)
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 5340 this patch: 5340
netdev/cc_maintainers warning 4 maintainers not CCed: edumazet@google.com davem@davemloft.net pabeni@redhat.com kuba@kernel.org
netdev/build_clang success Errors and warnings before: 1086 this patch: 1086
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 5554 this patch: 5554
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 164 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Matthew Wilcox (Oracle) Jan. 5, 2023, 9:46 p.m. UTC
Change the type here from page to netmem.  It works out well to
convert page_pool_refill_alloc_cache() to return a netmem instead
of a page as part of this commit.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/net/page_pool.h |  2 +-
 net/core/page_pool.c    | 52 ++++++++++++++++++++---------------------
 2 files changed, 27 insertions(+), 27 deletions(-)

Comments

Jesper Dangaard Brouer Jan. 6, 2023, 2:18 p.m. UTC | #1
On 05/01/2023 22.46, Matthew Wilcox (Oracle) wrote:
> Change the type here from page to netmem.  It works out well to
> convert page_pool_refill_alloc_cache() to return a netmem instead
> of a page as part of this commit.
> 
> Signed-off-by: Matthew Wilcox (Oracle)<willy@infradead.org>
> ---
>   include/net/page_pool.h |  2 +-
>   net/core/page_pool.c    | 52 ++++++++++++++++++++---------------------
>   2 files changed, 27 insertions(+), 27 deletions(-)

Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Ilias Apalodimas Jan. 10, 2023, 9:58 a.m. UTC | #2
On Thu, Jan 05, 2023 at 09:46:15PM +0000, Matthew Wilcox (Oracle) wrote:
> Change the type here from page to netmem.  It works out well to
> convert page_pool_refill_alloc_cache() to return a netmem instead
> of a page as part of this commit.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  include/net/page_pool.h |  2 +-
>  net/core/page_pool.c    | 52 ++++++++++++++++++++---------------------
>  2 files changed, 27 insertions(+), 27 deletions(-)
>
> diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> index 480baa22bc50..63aa530922de 100644
> --- a/include/net/page_pool.h
> +++ b/include/net/page_pool.h
> @@ -173,7 +173,7 @@ static inline bool netmem_is_pfmemalloc(const struct netmem *nmem)
>  #define PP_ALLOC_CACHE_REFILL	64
>  struct pp_alloc_cache {
>  	u32 count;
> -	struct page *cache[PP_ALLOC_CACHE_SIZE];
> +	struct netmem *cache[PP_ALLOC_CACHE_SIZE];
>  };
>
>  struct page_pool_params {
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 8f3f7cc5a2d5..c54217ce6b77 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -229,10 +229,10 @@ void page_pool_return_page(struct page_pool *pool, struct page *page)
>  }
>
>  noinline
> -static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
> +static struct netmem *page_pool_refill_alloc_cache(struct page_pool *pool)
>  {
>  	struct ptr_ring *r = &pool->ring;
> -	struct page *page;
> +	struct netmem *nmem;
>  	int pref_nid; /* preferred NUMA node */
>
>  	/* Quicker fallback, avoid locks when ring is empty */
> @@ -253,49 +253,49 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
>
>  	/* Refill alloc array, but only if NUMA match */
>  	do {
> -		page = __ptr_ring_consume(r);
> -		if (unlikely(!page))
> +		nmem = __ptr_ring_consume(r);
> +		if (unlikely(!nmem))
>  			break;
>
> -		if (likely(page_to_nid(page) == pref_nid)) {
> -			pool->alloc.cache[pool->alloc.count++] = page;
> +		if (likely(netmem_nid(nmem) == pref_nid)) {
> +			pool->alloc.cache[pool->alloc.count++] = nmem;
>  		} else {
>  			/* NUMA mismatch;
>  			 * (1) release 1 page to page-allocator and
>  			 * (2) break out to fallthrough to alloc_pages_node.
>  			 * This limit stress on page buddy alloactor.
>  			 */
> -			page_pool_return_page(pool, page);
> +			page_pool_return_netmem(pool, nmem);
>  			alloc_stat_inc(pool, waive);
> -			page = NULL;
> +			nmem = NULL;
>  			break;
>  		}
>  	} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
>
>  	/* Return last page */
>  	if (likely(pool->alloc.count > 0)) {
> -		page = pool->alloc.cache[--pool->alloc.count];
> +		nmem = pool->alloc.cache[--pool->alloc.count];
>  		alloc_stat_inc(pool, refill);
>  	}
>
> -	return page;
> +	return nmem;
>  }
>
>  /* fast path */
>  static struct page *__page_pool_get_cached(struct page_pool *pool)
>  {
> -	struct page *page;
> +	struct netmem *nmem;
>
>  	/* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
>  	if (likely(pool->alloc.count)) {
>  		/* Fast-path */
> -		page = pool->alloc.cache[--pool->alloc.count];
> +		nmem = pool->alloc.cache[--pool->alloc.count];
>  		alloc_stat_inc(pool, fast);
>  	} else {
> -		page = page_pool_refill_alloc_cache(pool);
> +		nmem = page_pool_refill_alloc_cache(pool);
>  	}
>
> -	return page;
> +	return netmem_page(nmem);
>  }
>
>  static void page_pool_dma_sync_for_device(struct page_pool *pool,
> @@ -391,13 +391,13 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
>
>  	/* Unnecessary as alloc cache is empty, but guarantees zero count */
>  	if (unlikely(pool->alloc.count > 0))
> -		return pool->alloc.cache[--pool->alloc.count];
> +		return netmem_page(pool->alloc.cache[--pool->alloc.count]);
>
>  	/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
>  	memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
>
>  	nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
> -					       pool->alloc.cache);
> +					(struct page **)pool->alloc.cache);
>  	if (unlikely(!nr_pages))
>  		return NULL;
>
> @@ -405,7 +405,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
>  	 * page element have not been (possibly) DMA mapped.
>  	 */
>  	for (i = 0; i < nr_pages; i++) {
> -		struct netmem *nmem = page_netmem(pool->alloc.cache[i]);
> +		struct netmem *nmem = pool->alloc.cache[i];
>  		if ((pp_flags & PP_FLAG_DMA_MAP) &&
>  		    unlikely(!page_pool_dma_map(pool, nmem))) {
>  			netmem_put(nmem);
> @@ -413,7 +413,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
>  		}
>
>  		page_pool_set_pp_info(pool, nmem);
> -		pool->alloc.cache[pool->alloc.count++] = netmem_page(nmem);
> +		pool->alloc.cache[pool->alloc.count++] = nmem;
>  		/* Track how many pages are held 'in-flight' */
>  		pool->pages_state_hold_cnt++;
>  		trace_page_pool_state_hold(pool, nmem,
> @@ -422,7 +422,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
>
>  	/* Return last page */
>  	if (likely(pool->alloc.count > 0)) {
> -		page = pool->alloc.cache[--pool->alloc.count];
> +		page = netmem_page(pool->alloc.cache[--pool->alloc.count]);
>  		alloc_stat_inc(pool, slow);
>  	} else {
>  		page = NULL;
> @@ -547,7 +547,7 @@ static bool page_pool_recycle_in_cache(struct page *page,
>  	}
>
>  	/* Caller MUST have verified/know (page_ref_count(page) == 1) */
> -	pool->alloc.cache[pool->alloc.count++] = page;
> +	pool->alloc.cache[pool->alloc.count++] = page_netmem(page);
>  	recycle_stat_inc(pool, cached);
>  	return true;
>  }
> @@ -785,7 +785,7 @@ static void page_pool_free(struct page_pool *pool)
>
>  static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
>  {
> -	struct page *page;
> +	struct netmem *nmem;
>
>  	if (pool->destroy_cnt)
>  		return;
> @@ -795,8 +795,8 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
>  	 * call concurrently.
>  	 */
>  	while (pool->alloc.count) {
> -		page = pool->alloc.cache[--pool->alloc.count];
> -		page_pool_return_page(pool, page);
> +		nmem = pool->alloc.cache[--pool->alloc.count];
> +		page_pool_return_netmem(pool, nmem);
>  	}
>  }
>
> @@ -878,15 +878,15 @@ EXPORT_SYMBOL(page_pool_destroy);
>  /* Caller must provide appropriate safe context, e.g. NAPI. */
>  void page_pool_update_nid(struct page_pool *pool, int new_nid)
>  {
> -	struct page *page;
> +	struct netmem *nmem;
>
>  	trace_page_pool_update_nid(pool, new_nid);
>  	pool->p.nid = new_nid;
>
>  	/* Flush pool alloc cache, as refill will check NUMA node */
>  	while (pool->alloc.count) {
> -		page = pool->alloc.cache[--pool->alloc.count];
> -		page_pool_return_page(pool, page);
> +		nmem = pool->alloc.cache[--pool->alloc.count];
> +		page_pool_return_netmem(pool, nmem);
>  	}
>  }
>  EXPORT_SYMBOL(page_pool_update_nid);
> --
> 2.35.1
>

Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
diff mbox series

Patch

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 480baa22bc50..63aa530922de 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -173,7 +173,7 @@  static inline bool netmem_is_pfmemalloc(const struct netmem *nmem)
 #define PP_ALLOC_CACHE_REFILL	64
 struct pp_alloc_cache {
 	u32 count;
-	struct page *cache[PP_ALLOC_CACHE_SIZE];
+	struct netmem *cache[PP_ALLOC_CACHE_SIZE];
 };
 
 struct page_pool_params {
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 8f3f7cc5a2d5..c54217ce6b77 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -229,10 +229,10 @@  void page_pool_return_page(struct page_pool *pool, struct page *page)
 }
 
 noinline
-static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
+static struct netmem *page_pool_refill_alloc_cache(struct page_pool *pool)
 {
 	struct ptr_ring *r = &pool->ring;
-	struct page *page;
+	struct netmem *nmem;
 	int pref_nid; /* preferred NUMA node */
 
 	/* Quicker fallback, avoid locks when ring is empty */
@@ -253,49 +253,49 @@  static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
 
 	/* Refill alloc array, but only if NUMA match */
 	do {
-		page = __ptr_ring_consume(r);
-		if (unlikely(!page))
+		nmem = __ptr_ring_consume(r);
+		if (unlikely(!nmem))
 			break;
 
-		if (likely(page_to_nid(page) == pref_nid)) {
-			pool->alloc.cache[pool->alloc.count++] = page;
+		if (likely(netmem_nid(nmem) == pref_nid)) {
+			pool->alloc.cache[pool->alloc.count++] = nmem;
 		} else {
 			/* NUMA mismatch;
 			 * (1) release 1 page to page-allocator and
 			 * (2) break out to fallthrough to alloc_pages_node.
 			 * This limit stress on page buddy alloactor.
 			 */
-			page_pool_return_page(pool, page);
+			page_pool_return_netmem(pool, nmem);
 			alloc_stat_inc(pool, waive);
-			page = NULL;
+			nmem = NULL;
 			break;
 		}
 	} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
 
 	/* Return last page */
 	if (likely(pool->alloc.count > 0)) {
-		page = pool->alloc.cache[--pool->alloc.count];
+		nmem = pool->alloc.cache[--pool->alloc.count];
 		alloc_stat_inc(pool, refill);
 	}
 
-	return page;
+	return nmem;
 }
 
 /* fast path */
 static struct page *__page_pool_get_cached(struct page_pool *pool)
 {
-	struct page *page;
+	struct netmem *nmem;
 
 	/* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
 	if (likely(pool->alloc.count)) {
 		/* Fast-path */
-		page = pool->alloc.cache[--pool->alloc.count];
+		nmem = pool->alloc.cache[--pool->alloc.count];
 		alloc_stat_inc(pool, fast);
 	} else {
-		page = page_pool_refill_alloc_cache(pool);
+		nmem = page_pool_refill_alloc_cache(pool);
 	}
 
-	return page;
+	return netmem_page(nmem);
 }
 
 static void page_pool_dma_sync_for_device(struct page_pool *pool,
@@ -391,13 +391,13 @@  static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
 
 	/* Unnecessary as alloc cache is empty, but guarantees zero count */
 	if (unlikely(pool->alloc.count > 0))
-		return pool->alloc.cache[--pool->alloc.count];
+		return netmem_page(pool->alloc.cache[--pool->alloc.count]);
 
 	/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
 	memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
 
 	nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
-					       pool->alloc.cache);
+					(struct page **)pool->alloc.cache);
 	if (unlikely(!nr_pages))
 		return NULL;
 
@@ -405,7 +405,7 @@  static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
 	 * page element have not been (possibly) DMA mapped.
 	 */
 	for (i = 0; i < nr_pages; i++) {
-		struct netmem *nmem = page_netmem(pool->alloc.cache[i]);
+		struct netmem *nmem = pool->alloc.cache[i];
 		if ((pp_flags & PP_FLAG_DMA_MAP) &&
 		    unlikely(!page_pool_dma_map(pool, nmem))) {
 			netmem_put(nmem);
@@ -413,7 +413,7 @@  static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
 		}
 
 		page_pool_set_pp_info(pool, nmem);
-		pool->alloc.cache[pool->alloc.count++] = netmem_page(nmem);
+		pool->alloc.cache[pool->alloc.count++] = nmem;
 		/* Track how many pages are held 'in-flight' */
 		pool->pages_state_hold_cnt++;
 		trace_page_pool_state_hold(pool, nmem,
@@ -422,7 +422,7 @@  static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
 
 	/* Return last page */
 	if (likely(pool->alloc.count > 0)) {
-		page = pool->alloc.cache[--pool->alloc.count];
+		page = netmem_page(pool->alloc.cache[--pool->alloc.count]);
 		alloc_stat_inc(pool, slow);
 	} else {
 		page = NULL;
@@ -547,7 +547,7 @@  static bool page_pool_recycle_in_cache(struct page *page,
 	}
 
 	/* Caller MUST have verified/know (page_ref_count(page) == 1) */
-	pool->alloc.cache[pool->alloc.count++] = page;
+	pool->alloc.cache[pool->alloc.count++] = page_netmem(page);
 	recycle_stat_inc(pool, cached);
 	return true;
 }
@@ -785,7 +785,7 @@  static void page_pool_free(struct page_pool *pool)
 
 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
 {
-	struct page *page;
+	struct netmem *nmem;
 
 	if (pool->destroy_cnt)
 		return;
@@ -795,8 +795,8 @@  static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
 	 * call concurrently.
 	 */
 	while (pool->alloc.count) {
-		page = pool->alloc.cache[--pool->alloc.count];
-		page_pool_return_page(pool, page);
+		nmem = pool->alloc.cache[--pool->alloc.count];
+		page_pool_return_netmem(pool, nmem);
 	}
 }
 
@@ -878,15 +878,15 @@  EXPORT_SYMBOL(page_pool_destroy);
 /* Caller must provide appropriate safe context, e.g. NAPI. */
 void page_pool_update_nid(struct page_pool *pool, int new_nid)
 {
-	struct page *page;
+	struct netmem *nmem;
 
 	trace_page_pool_update_nid(pool, new_nid);
 	pool->p.nid = new_nid;
 
 	/* Flush pool alloc cache, as refill will check NUMA node */
 	while (pool->alloc.count) {
-		page = pool->alloc.cache[--pool->alloc.count];
-		page_pool_return_page(pool, page);
+		nmem = pool->alloc.cache[--pool->alloc.count];
+		page_pool_return_netmem(pool, nmem);
 	}
 }
 EXPORT_SYMBOL(page_pool_update_nid);