aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Lobakin <aleksander.lobakin@intel.com>2024-03-29 17:55:07 +0100
committerJakub Kicinski <kuba@kernel.org>2024-04-02 18:13:49 -0700
commit39806b96c89ae5d52092c8f86393ecbfaae26697 (patch)
tree78701c158045e79d05961247fd068dad9fe12d3c
parent4a96a4e807c390a9d91b450ebe04eeb2e0ecc076 (diff)
downloadipsec-next-39806b96c89ae5d52092c8f86393ecbfaae26697.tar.gz
page_pool: try direct bulk recycling
Now that the checks for direct recycling possibility live inside the Page Pool core, reuse them when performing bulk recycling. page_pool_put_page_bulk() can be called from process context as well, page_pool_napi_local() takes care of this at the very beginning. Under high .ndo_xdp_xmit() traffic load, the win is 2-3% Pps assuming the sending driver uses xdp_return_frame_bulk() on Tx completion. Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> Link: https://lore.kernel.org/r/20240329165507.3240110-3-aleksander.lobakin@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--net/core/page_pool.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 9d56257e444beb..4c175091fc0ab9 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -772,8 +772,11 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count)
{
int i, bulk_len = 0;
+ bool allow_direct;
bool in_softirq;
+ allow_direct = page_pool_napi_local(pool);
+
for (i = 0; i < count; i++) {
struct page *page = virt_to_head_page(data[i]);
@@ -781,13 +784,13 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
if (!page_pool_is_last_ref(page))
continue;
- page = __page_pool_put_page(pool, page, -1, false);
+ page = __page_pool_put_page(pool, page, -1, allow_direct);
/* Approved for bulk recycling in ptr_ring cache */
if (page)
data[bulk_len++] = page;
}
- if (unlikely(!bulk_len))
+ if (!bulk_len)
return;
/* Bulk producer into ptr_ring page_pool cache */