aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2024-04-18 07:32:48 +0000
committerDavid S. Miller <davem@davemloft.net>2024-04-19 11:34:08 +0100
commitc85cedb38f41bebc3da08c4a7bc58c5f62a2a950 (patch)
treecd123321512e0d1b1f8271e902bd65ce923613e5
parent6c00dc4fdb40fa434cb1271fac3a1201449cb4d3 (diff)
downloadnet-queue-c85cedb38f41bebc3da08c4a7bc58c5f62a2a950.tar.gz
net_sched: sch_skbprio: implement lockless skbprio_dump()
Instead of relying on RTNL, skbprio_dump() can use READ_ONCE() annotation, paired with WRITE_ONCE() one in skbprio_change(). Also add a READ_ONCE(sch->limit) in skbprio_enqueue(). Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Simon Horman <horms@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/sched/sch_skbprio.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
index b4dd626c309c36..20ff7386b74bd8 100644
--- a/net/sched/sch_skbprio.c
+++ b/net/sched/sch_skbprio.c
@@ -79,7 +79,9 @@ static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
prio = min(skb->priority, max_priority);
qdisc = &q->qdiscs[prio];
- if (sch->q.qlen < sch->limit) {
+
+ /* sch->limit can change under us from skbprio_change() */
+ if (sch->q.qlen < READ_ONCE(sch->limit)) {
__skb_queue_tail(qdisc, skb);
qdisc_qstats_backlog_inc(sch, skb);
q->qstats[prio].backlog += qdisc_pkt_len(skb);
@@ -172,7 +174,7 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
return -EINVAL;
- sch->limit = ctl->limit;
+ WRITE_ONCE(sch->limit, ctl->limit);
return 0;
}
@@ -200,7 +202,7 @@ static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct tc_skbprio_qopt opt;
- opt.limit = sch->limit;
+ opt.limit = READ_ONCE(sch->limit);
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
return -1;