From b03f68ba26c85671c6734b14ea37a5955b0fb8d3 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 19 Jul 2023 18:04:06 -0700 Subject: [PATCH 1/4] eth: tsnep: let page recycling happen with skbs tsnep builds an skb with napi_build_skb() and then calls page_pool_release_page() for the page in which that skb's head sits. Use recycling instead, recycling of heads works just fine. Reviewed-by: Yunsheng Lin Link: https://lore.kernel.org/r/20230720010409.1967072-2-kuba@kernel.org Reviewed-by: Alexander Lobakin Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/engleder/tsnep_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 84751bb303a6..079f9f6ae21a 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -1333,7 +1333,7 @@ static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, skb = tsnep_build_skb(rx, page, length); if (skb) { - page_pool_release_page(rx->page_pool, page); + skb_mark_for_recycle(skb); rx->packets++; rx->bytes += length; From 98e2727c79d007d99a026eb66f481908e66af263 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 19 Jul 2023 18:04:07 -0700 Subject: [PATCH 2/4] eth: stmmac: let page recycling happen with skbs stmmac removes pages from the page pool after attaching them to skbs. Use page recycling instead. skb heads are always copied, and pages are always from page pool in this driver. We could as well mark all allocated skbs for recycling. Reviewed-by: Yunsheng Lin Link: https://lore.kernel.org/r/20230720010409.1967072-3-kuba@kernel.org Reviewed-by: Alexander Lobakin Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e7ca52f0d2f2..e1f1c034d325 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -5441,7 +5441,7 @@ read_again: priv->dma_conf.dma_buf_sz); /* Data payload appended into SKB */ - page_pool_release_page(rx_q->page_pool, buf->page); + skb_mark_for_recycle(skb); buf->page = NULL; } @@ -5453,7 +5453,7 @@ read_again: priv->dma_conf.dma_buf_sz); /* Data payload appended into SKB */ - page_pool_release_page(rx_q->page_pool, buf->sec_page); + skb_mark_for_recycle(skb); buf->sec_page = NULL; } From 535b9c61bdef6017228c708128b7849a476f8da5 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 19 Jul 2023 18:04:08 -0700 Subject: [PATCH 3/4] net: page_pool: hide page_pool_release_page() There seems to be no user calling page_pool_release_page() for legit reasons, all the users simply haven't been converted to skb-based recycling, yet. Previous changes converted them. Update the docs, and unexport the function. Link: https://lore.kernel.org/r/20230720010409.1967072-4-kuba@kernel.org Reviewed-by: Alexander Lobakin Signed-off-by: Jakub Kicinski --- Documentation/networking/page_pool.rst | 11 ++++------- include/net/page_pool.h | 10 ++-------- net/core/page_pool.c | 3 +-- 3 files changed, 7 insertions(+), 17 deletions(-) diff --git a/Documentation/networking/page_pool.rst b/Documentation/networking/page_pool.rst index 873efd97f822..0aa850cf4447 100644 --- a/Documentation/networking/page_pool.rst +++ b/Documentation/networking/page_pool.rst @@ -13,9 +13,9 @@ replacing dev_alloc_pages(). API keeps track of in-flight pages, in order to let API user know when it is safe to free a page_pool object. Thus, API users -must run page_pool_release_page() when a page is leaving the page_pool or -call page_pool_put_page() where appropriate in order to maintain correct -accounting. +must call page_pool_put_page() to free the page, or attach +the page to a page_pool-aware objects like skbs marked with +skb_mark_for_recycle(). API user must call page_pool_put_page() once on a page, as it will either recycle the page, or in case of refcnt > 1, it will @@ -87,9 +87,6 @@ a page will cause no race conditions is enough. must guarantee safe context (e.g NAPI), since it will recycle the page directly into the pool fast cache. -* page_pool_release_page(): Unmap the page (if mapped) and account for it on - in-flight counters. - * page_pool_dev_alloc_pages(): Get a page from the page allocator or page_pool caches. @@ -194,7 +191,7 @@ NAPI poller if XDP_DROP: page_pool_recycle_direct(page_pool, page); } else (packet_is_skb) { - page_pool_release_page(page_pool, page); + skb_mark_for_recycle(skb); new_page = page_pool_dev_alloc_pages(page_pool); } } diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 126f9e294389..f1d5cc1fa13b 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -18,9 +18,8 @@ * * API keeps track of in-flight pages, in-order to let API user know * when it is safe to dealloactor page_pool object. Thus, API users - * must make sure to call page_pool_release_page() when a page is - * "leaving" the page_pool. Or call page_pool_put_page() where - * appropiate. For maintaining correct accounting. + * must call page_pool_put_page() where appropriate and only attach + * the page to a page_pool-aware objects, like skbs marked for recycling. * * API user must only call page_pool_put_page() once on a page, as it * will either recycle the page, or in case of elevated refcnt, it @@ -251,7 +250,6 @@ void page_pool_unlink_napi(struct page_pool *pool); void page_pool_destroy(struct page_pool *pool); void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), struct xdp_mem_info *mem); -void page_pool_release_page(struct page_pool *pool, struct page *page); void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count); #else @@ -268,10 +266,6 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool, struct xdp_mem_info *mem) { } -static inline void page_pool_release_page(struct page_pool *pool, - struct page *page) -{ -} static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count) diff --git a/net/core/page_pool.c b/net/core/page_pool.c index a3e12a61d456..2c7cf5f2bcb8 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -492,7 +492,7 @@ static s32 page_pool_inflight(struct page_pool *pool) * a regular page (that will eventually be returned to the normal * page-allocator via put_page). */ -void page_pool_release_page(struct page_pool *pool, struct page *page) +static void page_pool_release_page(struct page_pool *pool, struct page *page) { dma_addr_t dma; int count; @@ -519,7 +519,6 @@ skip_dma_unmap: count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); trace_page_pool_state_release(pool, page, count); } -EXPORT_SYMBOL(page_pool_release_page); /* Return a page to the page allocator, cleaning up our state */ static void page_pool_return_page(struct page_pool *pool, struct page *page) From 07e0c7d3179da5d06132f3d71b740aa91bde52aa Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 19 Jul 2023 18:04:09 -0700 Subject: [PATCH 4/4] net: page_pool: merge page_pool_release_page() with page_pool_return_page() Now that page_pool_release_page() is not exported we can merge it with page_pool_return_page(). I believe that the "Do not replace this with page_pool_return_page()" comment was there in case page_pool_return_page() was not inlined, to avoid two function calls. Acked-by: Jesper Dangaard Brouer Reviewed-by: Yunsheng Lin Link: https://lore.kernel.org/r/20230720010409.1967072-5-kuba@kernel.org Reviewed-by: Alexander Lobakin Signed-off-by: Jakub Kicinski --- net/core/page_pool.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 2c7cf5f2bcb8..7ca456bfab71 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -492,7 +492,7 @@ static s32 page_pool_inflight(struct page_pool *pool) * a regular page (that will eventually be returned to the normal * page-allocator via put_page). */ -static void page_pool_release_page(struct page_pool *pool, struct page *page) +static void page_pool_return_page(struct page_pool *pool, struct page *page) { dma_addr_t dma; int count; @@ -518,12 +518,6 @@ skip_dma_unmap: */ count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); trace_page_pool_state_release(pool, page, count); -} - -/* Return a page to the page allocator, cleaning up our state */ -static void page_pool_return_page(struct page_pool *pool, struct page *page) -{ - page_pool_release_page(pool, page); put_page(page); /* An optimization would be to call __free_pages(page, pool->p.order) @@ -615,9 +609,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page, * will be invoking put_page. */ recycle_stat_inc(pool, released_refcnt); - /* Do not replace this with page_pool_return_page() */ - page_pool_release_page(pool, page); - put_page(page); + page_pool_return_page(pool, page); return NULL; }