aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2023-09-20 20:17:15 +0000
committerDavid S. Miller <davem@davemloft.net>2023-10-01 13:20:36 +0100
commit8f6c4ff9e0522da9313fbff5295ae208af679fed (patch)
treeb6a000dec2cee2f62ef3a94484e1c80e77171356 /net/sched
parent076433bd78d719b34d465c1e69eef512036b534c (diff)
downloadlinux-8f6c4ff9e0522da9313fbff5295ae208af679fed.tar.gz
net_sched: sch_fq: always garbage collect
FQ performs garbage collection at enqueue time, and only if number of flows is above a given threshold, which is hit after the qdisc has been used a bit. Since an RB-tree traversal is needed to locate a flow, it makes sense to perform gc all the time, to keep rb-trees smaller. This reduces by 50 % average storage costs in FQ, and avoids 1 cache line miss at enqueue time when fast path added in prior patch can not be used. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_fq.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 5cf3b50a24d58..681bbf34b7076 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -367,9 +367,7 @@ static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb)
root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
- if (q->flows >= (2U << q->fq_trees_log) &&
- q->inactive_flows > q->flows/2)
- fq_gc(q, root, sk);
+ fq_gc(q, root, sk);
p = &root->rb_node;
parent = NULL;