aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Xing <kernelxing@tencent.com>2024-04-18 15:36:02 +0800
committerDavid S. Miller <davem@davemloft.net>2024-04-19 11:38:03 +0100
commitf00bf5dc83202fd9f75bde80e46c3a747c34cc6a (patch)
treed50897be1abcfc81b44c08559effc1bfac29faa5
parent84b6823cd96b38c40b3b30beabbfa48d92990e1a (diff)
downloadnet-queue-f00bf5dc83202fd9f75bde80e46c3a747c34cc6a.tar.gz
net: rps: protect filter locklessly
As we can see, rflow->filter can be written/read concurrently, so lockless access is needed. Signed-off-by: Jason Xing <kernelxing@tencent.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/core/dev.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 6f027f676243d1..182fce2537d4af 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4530,9 +4530,9 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
goto out;
old_rflow = rflow;
rflow = &flow_table->flows[flow_id];
- rflow->filter = rc;
- if (old_rflow->filter == rflow->filter)
- old_rflow->filter = RPS_NO_FILTER;
+ WRITE_ONCE(rflow->filter, rc);
+ if (old_rflow->filter == rc)
+ WRITE_ONCE(old_rflow->filter, RPS_NO_FILTER);
out:
#endif
head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head);
@@ -4672,7 +4672,7 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
if (flow_table && flow_id <= flow_table->mask) {
rflow = &flow_table->flows[flow_id];
cpu = READ_ONCE(rflow->cpu);
- if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
+ if (READ_ONCE(rflow->filter) == filter_id && cpu < nr_cpu_ids &&
((int)(READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head) -
READ_ONCE(rflow->last_qtail)) <
(int)(10 * flow_table->mask)))