@@ -87,7 +87,7 @@ struct page_pool_stats {
u64 empty; /* failed refills due to empty ptr ring, forcing
* slow path allocation
*/
-
+ u64 refill; /* allocations via successful refill */
} alloc;
};
@@ -222,6 +222,10 @@ static inline u64 page_pool_stats_get_empty(struct page_pool *pool)
return pool->ps.alloc.empty;
}
+static inline u64 page_pool_stats_get_refill(struct page_pool *pool)
+{
+ return pool->ps.alloc.refill;
+}
#else
static inline void page_pool_destroy(struct page_pool *pool)
{
@@ -261,6 +265,11 @@ static inline u64 page_pool_stats_get_empty(struct page_pool *pool)
{
return 0;
}
+
+static inline u64 page_pool_stats_get_refill(struct page_pool *pool)
+{
+ return 0;
+}
#endif
void page_pool_put_page(struct page_pool *pool, struct page *page,
@@ -171,6 +171,8 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
pool->ps.alloc.fast++;
} else {
page = page_pool_refill_alloc_cache(pool);
+ if (likely(page))
+ pool->ps.alloc.refill++;
}
return page;
Add a stat tracking succesfull allocations which triggered a refill. A static inline wrapper is exposed for accessing this stat. Signed-off-by: Joe Damato <jdamato@fastly.com> --- include/net/page_pool.h | 11 ++++++++++- net/core/page_pool.c | 2 ++ 2 files changed, 12 insertions(+), 1 deletion(-)