@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
#ifndef _NET_PAGE_POOL_MEMORY_PROVIDER_H
#define _NET_PAGE_POOL_MEMORY_PROVIDER_H
@@ -7,4 +9,6 @@ int page_pool_mp_init_paged_area(struct page_pool *pool,
void page_pool_mp_release_area(struct page_pool *pool,
struct net_iov_area *area);
+void page_pool_mp_return_in_cache(struct page_pool *pool, netmem_ref netmem);
+
#endif
@@ -1213,3 +1213,22 @@ void page_pool_mp_release_area(struct page_pool *pool,
page_pool_release_page_dma(pool, net_iov_to_netmem(niov));
}
}
+
+/*
+ * page_pool_mp_return_in_cache() - return a netmem to the allocation cache.
+ * @pool: pool from which pages were allocated
+ * @netmem: netmem to return
+ *
+ * Return already allocated and accounted netmem to the page pool's allocation
+ * cache. The function doesn't provide synchronisation and must only be called
+ * from the napi context.
+ */
+void page_pool_mp_return_in_cache(struct page_pool *pool, netmem_ref netmem)
+{
+ if (WARN_ON_ONCE(pool->alloc.count >= PP_ALLOC_CACHE_REFILL))
+ return;
+
+ page_pool_dma_sync_for_device(pool, netmem, -1);
+ page_pool_fragment_netmem(netmem, 1);
+ pool->alloc.cache[pool->alloc.count++] = netmem;
+}