page_pool: Add recycle stats

Add per-cpu stats tracking page pool recycling events:
	- cached: recycling placed page in the page pool cache
	- cache_full: page pool cache was full
	- ring: page placed into the ptr ring
	- ring_full: page released from page pool because the ptr ring was full
	- released_refcnt: page released (and not recycled) because refcnt > 1

Signed-off-by: Joe Damato <jdamato@fastly.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: hongrongxuan <hongrongxuan@huawei.com>
This commit is contained in:
Joe Damato 2022-03-01 23:55:48 -08:00 committed by Jianping Liu
parent 0c7d0cb59b
commit 6c05b6c3b5
2 changed files with 44 additions and 2 deletions

View File

@ -93,6 +93,18 @@ struct page_pool_alloc_stats {
u64 refill; /* allocations via successful refill */ u64 refill; /* allocations via successful refill */
u64 waive; /* failed refills due to numa zone mismatch */ u64 waive; /* failed refills due to numa zone mismatch */
}; };
struct page_pool_recycle_stats {
u64 cached; /* recycling placed page in the cache. */
u64 cache_full; /* cache was full */
u64 ring; /* recycling placed page back into ptr ring */
u64 ring_full; /* page was released from page-pool because
* PTR ring was full.
*/
u64 released_refcnt; /* page released because of elevated
* refcnt
*/
};
#endif #endif
struct page_pool { struct page_pool {
@ -141,6 +153,10 @@ struct page_pool {
*/ */
struct ptr_ring ring; struct ptr_ring ring;
#ifdef CONFIG_PAGE_POOL_STATS
/* recycle stats are per-cpu to avoid locking */
struct page_pool_recycle_stats __percpu *recycle_stats;
#endif
atomic_t pages_state_release_cnt; atomic_t pages_state_release_cnt;
/* A page_pool is strictly tied to a single RX-queue being /* A page_pool is strictly tied to a single RX-queue being

View File

@ -29,8 +29,15 @@
#ifdef CONFIG_PAGE_POOL_STATS #ifdef CONFIG_PAGE_POOL_STATS
/* alloc_stat_inc is intended to be used in softirq context */ /* alloc_stat_inc is intended to be used in softirq context */
#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
/* recycle_stat_inc is safe to use when preemption is possible. */
#define recycle_stat_inc(pool, __stat) \
do { \
struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
this_cpu_inc(s->__stat); \
} while (0)
#else #else
#define alloc_stat_inc(pool, __stat) #define alloc_stat_inc(pool, __stat)
#define recycle_stat_inc(pool, __stat)
#endif #endif
static int page_pool_init(struct page_pool *pool, static int page_pool_init(struct page_pool *pool,
@ -78,6 +85,12 @@ static int page_pool_init(struct page_pool *pool,
pool->p.flags & PP_FLAG_PAGE_FRAG) pool->p.flags & PP_FLAG_PAGE_FRAG)
return -EINVAL; return -EINVAL;
#ifdef CONFIG_PAGE_POOL_STATS
pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
if (!pool->recycle_stats)
return -ENOMEM;
#endif
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
return -ENOMEM; return -ENOMEM;
@ -408,7 +421,12 @@ static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
else else
ret = ptr_ring_produce_bh(&pool->ring, page); ret = ptr_ring_produce_bh(&pool->ring, page);
return (ret == 0) ? true : false; if (!ret) {
recycle_stat_inc(pool, ring);
return true;
}
return false;
} }
/* Only allow direct recycling in special circumstances, into the /* Only allow direct recycling in special circumstances, into the
@ -419,11 +437,14 @@ static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
static bool page_pool_recycle_in_cache(struct page *page, static bool page_pool_recycle_in_cache(struct page *page,
struct page_pool *pool) struct page_pool *pool)
{ {
if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
recycle_stat_inc(pool, cache_full);
return false; return false;
}
/* Caller MUST have verified/know (page_ref_count(page) == 1) */ /* Caller MUST have verified/know (page_ref_count(page) == 1) */
pool->alloc.cache[pool->alloc.count++] = page; pool->alloc.cache[pool->alloc.count++] = page;
recycle_stat_inc(pool, cached);
return true; return true;
} }
@ -478,6 +499,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
* doing refcnt based recycle tricks, meaning another process * doing refcnt based recycle tricks, meaning another process
* will be invoking put_page. * will be invoking put_page.
*/ */
recycle_stat_inc(pool, released_refcnt);
/* Do not replace this with page_pool_return_page() */ /* Do not replace this with page_pool_return_page() */
page_pool_release_page(pool, page); page_pool_release_page(pool, page);
put_page(page); put_page(page);
@ -491,6 +513,7 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
if (page && !page_pool_recycle_in_ring(pool, page)) { if (page && !page_pool_recycle_in_ring(pool, page)) {
/* Cache full, fallback to free pages */ /* Cache full, fallback to free pages */
recycle_stat_inc(pool, ring_full);
page_pool_return_page(pool, page); page_pool_return_page(pool, page);
} }
} }
@ -637,6 +660,9 @@ static void page_pool_free(struct page_pool *pool)
if (pool->p.flags & PP_FLAG_DMA_MAP) if (pool->p.flags & PP_FLAG_DMA_MAP)
put_device(pool->p.dev); put_device(pool->p.dev);
#ifdef CONFIG_PAGE_POOL_STATS
free_percpu(pool->recycle_stats);
#endif
kfree(pool); kfree(pool);
} }