diff mbox series

[v3,08/26] page_pool: Convert pp_alloc_cache to contain netmem

Message ID 20230111042214.907030-9-willy@infradead.org (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series Split netmem from struct page | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-7 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-34 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-19 fail Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32 on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-22 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-36 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-38 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for test_progs_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_maps on s390x with gcc
netdev/tree_selection success Guessed tree name to be net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix warning Target tree name not specified in the subject
netdev/cover_letter success Series has a cover letter
netdev/patch_count fail Series longer than 15 patches (and no cover letter)
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 5339 this patch: 5339
netdev/cc_maintainers warning 4 maintainers not CCed: edumazet@google.com davem@davemloft.net pabeni@redhat.com kuba@kernel.org
netdev/build_clang success Errors and warnings before: 1085 this patch: 1085
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 5553 this patch: 5553
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 164 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Matthew Wilcox (Oracle) Jan. 11, 2023, 4:21 a.m. UTC
Change the type here from page to netmem.  It works out well to
convert page_pool_refill_alloc_cache() to return a netmem instead
of a page as part of this commit.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
---
 include/net/page_pool.h |  2 +-
 net/core/page_pool.c    | 52 ++++++++++++++++++++---------------------
 2 files changed, 27 insertions(+), 27 deletions(-)

Comments

Shay Agroskin Jan. 14, 2023, 12:28 p.m. UTC | #1
"Matthew Wilcox (Oracle)" <willy@infradead.org> writes:

> Change the type here from page to netmem.  It works out well to
> convert page_pool_refill_alloc_cache() to return a netmem 
> instead
> of a page as part of this commit.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
> Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
> Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
> ---
>  include/net/page_pool.h |  2 +-
>  net/core/page_pool.c    | 52 
>  ++++++++++++++++++++---------------------
>  2 files changed, 27 insertions(+), 27 deletions(-)
>
> diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> index 34d47c10550e..583c13f6f2ab 100644
> --- a/include/net/page_pool.h
> +++ b/include/net/page_pool.h
> @@ -173,7 +173,7 @@ static inline bool 
> netmem_is_pfmemalloc(const struct netmem *nmem)
>  #define PP_ALLOC_CACHE_REFILL	64
>  struct pp_alloc_cache {
>  	u32 count;
> -	struct page *cache[PP_ALLOC_CACHE_SIZE];
> +	struct netmem *cache[PP_ALLOC_CACHE_SIZE];
>  };
>  
>  struct page_pool_params {
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 8f3f7cc5a2d5..c54217ce6b77 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -229,10 +229,10 @@ void page_pool_return_page(struct 
> page_pool *pool, struct page *page)
>  }
>  
>  noinline
> -static struct page *page_pool_refill_alloc_cache(struct 
> page_pool *pool)
> +static struct netmem *page_pool_refill_alloc_cache(struct 
> page_pool *pool)
>  {
>  	struct ptr_ring *r = &pool->ring;
> -	struct page *page;
> +	struct netmem *nmem;
>  	int pref_nid; /* preferred NUMA node */
>  
>  	/* Quicker fallback, avoid locks when ring is empty */
> @@ -253,49 +253,49 @@ static struct page 
> *page_pool_refill_alloc_cache(struct page_pool *pool)
>  
>  	/* Refill alloc array, but only if NUMA match */
>  	do {
> -		page = __ptr_ring_consume(r);
> -		if (unlikely(!page))
> +		nmem = __ptr_ring_consume(r);
> +		if (unlikely(!nmem))
>  			break;
>  
> -		if (likely(page_to_nid(page) == pref_nid)) {
> -			pool->alloc.cache[pool->alloc.count++] = 
> page;
> +		if (likely(netmem_nid(nmem) == pref_nid)) {
> +			pool->alloc.cache[pool->alloc.count++] = 
> nmem;
>  		} else {
>  			/* NUMA mismatch;
>  			 * (1) release 1 page to page-allocator 
>  and
>  			 * (2) break out to fallthrough to 
>  alloc_pages_node.
>  			 * This limit stress on page buddy 
>  alloactor.
>  			 */
> -			page_pool_return_page(pool, page);
> +			page_pool_return_netmem(pool, nmem);
>  			alloc_stat_inc(pool, waive);
> -			page = NULL;
> +			nmem = NULL;
>  			break;
>  		}
>  	} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
>  
>  	/* Return last page */
>  	if (likely(pool->alloc.count > 0)) {
> -		page = pool->alloc.cache[--pool->alloc.count];
> +		nmem = pool->alloc.cache[--pool->alloc.count];
>  		alloc_stat_inc(pool, refill);
>  	}
>  
> -	return page;
> +	return nmem;
>  }
>  
>  /* fast path */
>  static struct page *__page_pool_get_cached(struct page_pool 
>  *pool)
>  {
> -	struct page *page;
> +	struct netmem *nmem;
>  
>  	/* Caller MUST guarantee safe non-concurrent access, 
>  e.g. softirq */
>  	if (likely(pool->alloc.count)) {
>  		/* Fast-path */
> -		page = pool->alloc.cache[--pool->alloc.count];
> +		nmem = pool->alloc.cache[--pool->alloc.count];
>  		alloc_stat_inc(pool, fast);
>  	} else {
> -		page = page_pool_refill_alloc_cache(pool);
> +		nmem = page_pool_refill_alloc_cache(pool);
>  	}
>  
> -	return page;
> +	return netmem_page(nmem);
>  }
>  
>  static void page_pool_dma_sync_for_device(struct page_pool 
>  *pool,
> @@ -391,13 +391,13 @@ static struct page 
> *__page_pool_alloc_pages_slow(struct page_pool *pool,
>  
>  	/* Unnecessary as alloc cache is empty, but guarantees 
>  zero count */
>  	if (unlikely(pool->alloc.count > 0))
> -		return pool->alloc.cache[--pool->alloc.count];
> +		return 
> netmem_page(pool->alloc.cache[--pool->alloc.count]);
>  
>  	/* Mark empty alloc.cache slots "empty" for 
>  alloc_pages_bulk_array */
>  	memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
>  
>  	nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, 
>  bulk,
> -					       pool->alloc.cache);
> +					(struct page 
> **)pool->alloc.cache);

Can you fix the alignment here (so that the '(struct page **)' 
would align the the 'gfp' argument one line above) ?

Shay

>  	if (unlikely(!nr_pages))
>  		return NULL;
>  
> @@ -405,7 +405,7 @@ static struct page 
> *__page_pool_alloc_pages_slow(struct page_pool *pool,
>  	 * page element have not been (possibly) DMA mapped.
>  	 */
>  	for (i = 0; i < nr_pages; i++) {
> -		struct netmem *nmem = 
> page_netmem(pool->alloc.cache[i]);
> +		struct netmem *nmem = pool->alloc.cache[i];
>  		if ((pp_flags & PP_FLAG_DMA_MAP) &&
>  		    unlikely(!page_pool_dma_map(pool, nmem))) {
>  			netmem_put(nmem);
> @@ -413,7 +413,7 @@ static struct page 
> *__page_pool_alloc_pages_slow(struct page_pool *pool,
>  		}
>  
>  		page_pool_set_pp_info(pool, nmem);
> -		pool->alloc.cache[pool->alloc.count++] = 
> netmem_page(nmem);
> +		pool->alloc.cache[pool->alloc.count++] = nmem;
>  		/* Track how many pages are held 'in-flight' */
>  		pool->pages_state_hold_cnt++;
>  		trace_page_pool_state_hold(pool, nmem,
> @@ -422,7 +422,7 @@ static struct page 
> *__page_pool_alloc_pages_slow(struct page_pool *pool,
>  
>  	/* Return last page */
>  	if (likely(pool->alloc.count > 0)) {
> -		page = pool->alloc.cache[--pool->alloc.count];
> +		page = 
> netmem_page(pool->alloc.cache[--pool->alloc.count]);
>  		alloc_stat_inc(pool, slow);
>  	} else {
>  		page = NULL;
> @@ -547,7 +547,7 @@ static bool 
> page_pool_recycle_in_cache(struct page *page,
>  	}
>  
>  	/* Caller MUST have verified/know (page_ref_count(page) == 
>  1) */
> -	pool->alloc.cache[pool->alloc.count++] = page;
> +	pool->alloc.cache[pool->alloc.count++] = 
> page_netmem(page);
>  	recycle_stat_inc(pool, cached);
>  	return true;
>  }
> @@ -785,7 +785,7 @@ static void page_pool_free(struct page_pool 
> *pool)
>  
>  static void page_pool_empty_alloc_cache_once(struct page_pool 
>  *pool)
>  {
> -	struct page *page;
> +	struct netmem *nmem;
>  
>  	if (pool->destroy_cnt)
>  		return;
> @@ -795,8 +795,8 @@ static void 
> page_pool_empty_alloc_cache_once(struct page_pool *pool)
>  	 * call concurrently.
>  	 */
>  	while (pool->alloc.count) {
> -		page = pool->alloc.cache[--pool->alloc.count];
> -		page_pool_return_page(pool, page);
> +		nmem = pool->alloc.cache[--pool->alloc.count];
> +		page_pool_return_netmem(pool, nmem);
>  	}
>  }
>  
> @@ -878,15 +878,15 @@ EXPORT_SYMBOL(page_pool_destroy);
>  /* Caller must provide appropriate safe context, e.g. NAPI. */
>  void page_pool_update_nid(struct page_pool *pool, int new_nid)
>  {
> -	struct page *page;
> +	struct netmem *nmem;
>  
>  	trace_page_pool_update_nid(pool, new_nid);
>  	pool->p.nid = new_nid;
>  
>  	/* Flush pool alloc cache, as refill will check NUMA node 
>  */
>  	while (pool->alloc.count) {
> -		page = pool->alloc.cache[--pool->alloc.count];
> -		page_pool_return_page(pool, page);
> +		nmem = pool->alloc.cache[--pool->alloc.count];
> +		page_pool_return_netmem(pool, nmem);
>  	}
>  }
>  EXPORT_SYMBOL(page_pool_update_nid);
Matthew Wilcox (Oracle) Jan. 14, 2023, 5:58 p.m. UTC | #2
On Sat, Jan 14, 2023 at 02:28:50PM +0200, Shay Agroskin wrote:
> >  	memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
> >    	nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid,  bulk,
> > -					       pool->alloc.cache);
> > +					(struct page **)pool->alloc.cache);
> 
> Can you fix the alignment here (so that the '(struct page **)' would align
> the the 'gfp' argument one line above) ?

No, that makes the line too long.
Shay Agroskin Jan. 15, 2023, 11:03 a.m. UTC | #3
Matthew Wilcox <willy@infradead.org> writes:

> CAUTION: This email originated from outside of the 
> organization. Do not click links or open attachments unless you 
> can confirm the sender and know the content is safe.
>
>
>
> On Sat, Jan 14, 2023 at 02:28:50PM +0200, Shay Agroskin wrote:
>> >     memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
>> >     nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, 
>> >     bulk,
>> > - 
>> > pool->alloc.cache);
>> > +                                   (struct page 
>> > **)pool->alloc.cache);
>>
>> Can you fix the alignment here (so that the '(struct page **)' 
>> would align
>> the the 'gfp' argument one line above) ?
>
> No, that makes the line too long.

Couldn't find any word about inlining in the coding style. AFAIK 
82 characters line is considered ok (and the file you're editing 
has even longer lines than that). To me it'd look better aligned, 
but suit yourself
diff mbox series

Patch

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 34d47c10550e..583c13f6f2ab 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -173,7 +173,7 @@  static inline bool netmem_is_pfmemalloc(const struct netmem *nmem)
 #define PP_ALLOC_CACHE_REFILL	64
 struct pp_alloc_cache {
 	u32 count;
-	struct page *cache[PP_ALLOC_CACHE_SIZE];
+	struct netmem *cache[PP_ALLOC_CACHE_SIZE];
 };
 
 struct page_pool_params {
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 8f3f7cc5a2d5..c54217ce6b77 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -229,10 +229,10 @@  void page_pool_return_page(struct page_pool *pool, struct page *page)
 }
 
 noinline
-static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
+static struct netmem *page_pool_refill_alloc_cache(struct page_pool *pool)
 {
 	struct ptr_ring *r = &pool->ring;
-	struct page *page;
+	struct netmem *nmem;
 	int pref_nid; /* preferred NUMA node */
 
 	/* Quicker fallback, avoid locks when ring is empty */
@@ -253,49 +253,49 @@  static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
 
 	/* Refill alloc array, but only if NUMA match */
 	do {
-		page = __ptr_ring_consume(r);
-		if (unlikely(!page))
+		nmem = __ptr_ring_consume(r);
+		if (unlikely(!nmem))
 			break;
 
-		if (likely(page_to_nid(page) == pref_nid)) {
-			pool->alloc.cache[pool->alloc.count++] = page;
+		if (likely(netmem_nid(nmem) == pref_nid)) {
+			pool->alloc.cache[pool->alloc.count++] = nmem;
 		} else {
 			/* NUMA mismatch;
 			 * (1) release 1 page to page-allocator and
 			 * (2) break out to fallthrough to alloc_pages_node.
 			 * This limit stress on page buddy alloactor.
 			 */
-			page_pool_return_page(pool, page);
+			page_pool_return_netmem(pool, nmem);
 			alloc_stat_inc(pool, waive);
-			page = NULL;
+			nmem = NULL;
 			break;
 		}
 	} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
 
 	/* Return last page */
 	if (likely(pool->alloc.count > 0)) {
-		page = pool->alloc.cache[--pool->alloc.count];
+		nmem = pool->alloc.cache[--pool->alloc.count];
 		alloc_stat_inc(pool, refill);
 	}
 
-	return page;
+	return nmem;
 }
 
 /* fast path */
 static struct page *__page_pool_get_cached(struct page_pool *pool)
 {
-	struct page *page;
+	struct netmem *nmem;
 
 	/* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
 	if (likely(pool->alloc.count)) {
 		/* Fast-path */
-		page = pool->alloc.cache[--pool->alloc.count];
+		nmem = pool->alloc.cache[--pool->alloc.count];
 		alloc_stat_inc(pool, fast);
 	} else {
-		page = page_pool_refill_alloc_cache(pool);
+		nmem = page_pool_refill_alloc_cache(pool);
 	}
 
-	return page;
+	return netmem_page(nmem);
 }
 
 static void page_pool_dma_sync_for_device(struct page_pool *pool,
@@ -391,13 +391,13 @@  static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
 
 	/* Unnecessary as alloc cache is empty, but guarantees zero count */
 	if (unlikely(pool->alloc.count > 0))
-		return pool->alloc.cache[--pool->alloc.count];
+		return netmem_page(pool->alloc.cache[--pool->alloc.count]);
 
 	/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
 	memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
 
 	nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
-					       pool->alloc.cache);
+					(struct page **)pool->alloc.cache);
 	if (unlikely(!nr_pages))
 		return NULL;
 
@@ -405,7 +405,7 @@  static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
 	 * page element have not been (possibly) DMA mapped.
 	 */
 	for (i = 0; i < nr_pages; i++) {
-		struct netmem *nmem = page_netmem(pool->alloc.cache[i]);
+		struct netmem *nmem = pool->alloc.cache[i];
 		if ((pp_flags & PP_FLAG_DMA_MAP) &&
 		    unlikely(!page_pool_dma_map(pool, nmem))) {
 			netmem_put(nmem);
@@ -413,7 +413,7 @@  static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
 		}
 
 		page_pool_set_pp_info(pool, nmem);
-		pool->alloc.cache[pool->alloc.count++] = netmem_page(nmem);
+		pool->alloc.cache[pool->alloc.count++] = nmem;
 		/* Track how many pages are held 'in-flight' */
 		pool->pages_state_hold_cnt++;
 		trace_page_pool_state_hold(pool, nmem,
@@ -422,7 +422,7 @@  static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
 
 	/* Return last page */
 	if (likely(pool->alloc.count > 0)) {
-		page = pool->alloc.cache[--pool->alloc.count];
+		page = netmem_page(pool->alloc.cache[--pool->alloc.count]);
 		alloc_stat_inc(pool, slow);
 	} else {
 		page = NULL;
@@ -547,7 +547,7 @@  static bool page_pool_recycle_in_cache(struct page *page,
 	}
 
 	/* Caller MUST have verified/know (page_ref_count(page) == 1) */
-	pool->alloc.cache[pool->alloc.count++] = page;
+	pool->alloc.cache[pool->alloc.count++] = page_netmem(page);
 	recycle_stat_inc(pool, cached);
 	return true;
 }
@@ -785,7 +785,7 @@  static void page_pool_free(struct page_pool *pool)
 
 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
 {
-	struct page *page;
+	struct netmem *nmem;
 
 	if (pool->destroy_cnt)
 		return;
@@ -795,8 +795,8 @@  static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
 	 * call concurrently.
 	 */
 	while (pool->alloc.count) {
-		page = pool->alloc.cache[--pool->alloc.count];
-		page_pool_return_page(pool, page);
+		nmem = pool->alloc.cache[--pool->alloc.count];
+		page_pool_return_netmem(pool, nmem);
 	}
 }
 
@@ -878,15 +878,15 @@  EXPORT_SYMBOL(page_pool_destroy);
 /* Caller must provide appropriate safe context, e.g. NAPI. */
 void page_pool_update_nid(struct page_pool *pool, int new_nid)
 {
-	struct page *page;
+	struct netmem *nmem;
 
 	trace_page_pool_update_nid(pool, new_nid);
 	pool->p.nid = new_nid;
 
 	/* Flush pool alloc cache, as refill will check NUMA node */
 	while (pool->alloc.count) {
-		page = pool->alloc.cache[--pool->alloc.count];
-		page_pool_return_page(pool, page);
+		nmem = pool->alloc.cache[--pool->alloc.count];
+		page_pool_return_netmem(pool, nmem);
 	}
 }
 EXPORT_SYMBOL(page_pool_update_nid);