aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2023-04-21 09:43:56 +0000
committerDavid S. Miller <davem@davemloft.net>2023-04-23 13:35:07 +0100
commita1aaee7f8f79d1b0595e24f8c3caed24630d6cb6 (patch)
treeb13d4579a77711e6bbb662d134bb37d9a35a2103 /net/core/dev.c
parente6f50edfef046cf8ad541b4d5972bf38fdcdec39 (diff)
downloadlinux-a1aaee7f8f79d1b0595e24f8c3caed24630d6cb6.tar.gz
net: make napi_threaded_poll() aware of sd->defer_list
If we call skb_defer_free_flush() from napi_threaded_poll(), we can avoid to raise IPI from skb_attempt_defer_free() when the list becomes too big. This allows napi_threaded_poll() to rely less on softirqs, and lowers latency caused by a too big list. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 81ded215731bdb..7d9ec23f97c6ec 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6622,6 +6622,7 @@ static void skb_defer_free_flush(struct softnet_data *sd)
static int napi_threaded_poll(void *data)
{
struct napi_struct *napi = data;
+ struct softnet_data *sd;
void *have;
while (!napi_thread_wait(napi)) {
@@ -6629,11 +6630,13 @@ static int napi_threaded_poll(void *data)
bool repoll = false;
local_bh_disable();
+ sd = this_cpu_ptr(&softnet_data);
have = netpoll_poll_lock(napi);
__napi_poll(napi, &repoll);
netpoll_poll_unlock(have);
+ skb_defer_free_flush(sd);
local_bh_enable();
if (!repoll)