page_pool: keep pp info as long as page pool owns the page

Currently, page->pp is cleared and set everytime the page
is recycled, which is unnecessary.

So only set the page->pp when the page is added to the page
pool and only clear it when the page is released from the
page pool.

This is also a preparation to support allocating frag page
in page pool.

Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: hongrongxuan <hongrongxuan@huawei.com>
This commit is contained in:
Yunsheng Lin 2021-08-06 10:46:19 +08:00 committed by Jianping Liu
parent cf632b0d15
commit 8c39e5e810
3 changed files with 18 additions and 14 deletions

View File

@ -4618,11 +4618,9 @@ static inline bool skb_csum_is_sctp(struct sk_buff *skb)
}
#ifdef CONFIG_PAGE_POOL
static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page,
struct page_pool *pp)
static inline void skb_mark_for_recycle(struct sk_buff *skb)
{
skb->pp_recycle = 1;
page_pool_store_mem_info(page, pp);
}
#endif

View File

@ -251,11 +251,4 @@ static inline void page_pool_ring_unlock(struct page_pool *pool)
spin_unlock_bh(&pool->ring.producer_lock);
}
/* Store mem_info on struct page and use it while recycling skb frags */
static inline
void page_pool_store_mem_info(struct page *page, struct page_pool *pp)
{
page->pp = pp;
}
#endif /* _NET_PAGE_POOL_H */

View File

@ -202,6 +202,19 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
return true;
}
static void page_pool_set_pp_info(struct page_pool *pool,
struct page *page)
{
page->pp = pool;
page->pp_magic |= PP_SIGNATURE;
}
static void page_pool_clear_pp_info(struct page *page)
{
page->pp_magic = 0;
page->pp = NULL;
}
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
gfp_t gfp)
{
@ -218,7 +231,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
return NULL;
}
page->pp_magic |= PP_SIGNATURE;
page_pool_set_pp_info(pool, page);
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
@ -262,7 +275,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
put_page(page);
continue;
}
page->pp_magic |= PP_SIGNATURE;
page_pool_set_pp_info(pool, page);
pool->alloc.cache[pool->alloc.count++] = page;
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
@ -341,7 +355,7 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
DMA_ATTR_SKIP_CPU_SYNC);
page_pool_set_dma_addr(page, 0);
skip_dma_unmap:
page->pp_magic = 0;
page_pool_clear_pp_info(page);
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
@ -639,7 +653,6 @@ bool page_pool_return_skb_page(struct page *page)
* The page will be returned to the pool here regardless of the
* 'flipped' fragment being in use or not.
*/
page->pp = NULL;
page_pool_put_full_page(pp, page, false);
return true;