aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@nuts.davemloft.net>2004-07-05 06:39:01 -0700
committerDavid S. Miller <davem@nuts.davemloft.net>2004-07-05 06:39:01 -0700
commit00e11c5f00dbfe2a65f796da881f9bb5b4ce5cbf (patch)
treedd43b2a475cee98fab9babda6e1c460bf15c5acd /net
parent18c9628e51ae509054a69a0feb5b834be0f9324d (diff)
downloadhistory-00e11c5f00dbfe2a65f796da881f9bb5b4ce5cbf.tar.gz
[PKT_SCHED]: Do not embed spinlock in tc_stats structure.
This makes it not get sized/copied around to/from userspace correctly. The real crux of the problem comes from the rtnetlink attribute copying line which read: RTA_PUT(skb, TCA_STATS, (char*)&st->lock - (char*)st, st); which is not necessarily sizeof(struct tc_stats) due to alignment issues.
Diffstat (limited to 'net')
-rw-r--r--net/sched/estimator.c8
-rw-r--r--net/sched/police.c10
-rw-r--r--net/sched/sch_api.c20
-rw-r--r--net/sched/sch_atm.c1
-rw-r--r--net/sched/sch_cbq.c13
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_hfsc.c17
-rw-r--r--net/sched/sch_htb.c1
8 files changed, 42 insertions, 30 deletions
diff --git a/net/sched/estimator.c b/net/sched/estimator.c
index d0f0df64c350cd..393496b2f202da 100644
--- a/net/sched/estimator.c
+++ b/net/sched/estimator.c
@@ -81,6 +81,7 @@ struct qdisc_estimator
{
struct qdisc_estimator *next;
struct tc_stats *stats;
+ spinlock_t *stats_lock;
unsigned interval;
int ewma_log;
u64 last_bytes;
@@ -112,7 +113,7 @@ static void est_timer(unsigned long arg)
u32 npackets;
u32 rate;
- spin_lock(st->lock);
+ spin_lock(e->stats_lock);
nbytes = st->bytes;
npackets = st->packets;
rate = (nbytes - e->last_bytes)<<(7 - idx);
@@ -124,14 +125,14 @@ static void est_timer(unsigned long arg)
e->last_packets = npackets;
e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
e->stats->pps = (e->avpps+0x1FF)>>10;
- spin_unlock(st->lock);
+ spin_unlock(e->stats_lock);
}
mod_timer(&elist[idx].timer, jiffies + ((HZ/4)<<idx));
read_unlock(&est_lock);
}
-int qdisc_new_estimator(struct tc_stats *stats, struct rtattr *opt)
+int qdisc_new_estimator(struct tc_stats *stats, spinlock_t *stats_lock, struct rtattr *opt)
{
struct qdisc_estimator *est;
struct tc_estimator *parm = RTA_DATA(opt);
@@ -149,6 +150,7 @@ int qdisc_new_estimator(struct tc_stats *stats, struct rtattr *opt)
memset(est, 0, sizeof(*est));
est->interval = parm->interval + 2;
est->stats = stats;
+ est->stats_lock = stats_lock;
est->ewma_log = parm->ewma_log;
est->last_bytes = stats->bytes;
est->avbps = stats->bps<<5;
diff --git a/net/sched/police.c b/net/sched/police.c
index 807362a4c375c2..12bd4400653766 100644
--- a/net/sched/police.c
+++ b/net/sched/police.c
@@ -207,7 +207,7 @@ int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,struct tc_actio
ret = 1;
p->refcnt = 1;
spin_lock_init(&p->lock);
- p->stats.lock = &p->lock;
+ p->stats_lock = &p->lock;
if (bind)
p->bindcnt = 1;
override:
@@ -245,7 +245,7 @@ override:
p->index = parm->index ? : tcf_police_new_index();
#ifdef CONFIG_NET_ESTIMATOR
if (est)
- qdisc_new_estimator(&p->stats, est);
+ qdisc_new_estimator(&p->stats, p->stats_lock, est);
#endif
h = tcf_police_hash(p->index);
write_lock_bh(&police_lock);
@@ -280,7 +280,7 @@ int tcf_act_police_stats(struct sk_buff *skb, struct tc_action *a)
struct tcf_police *p;
p = PRIV(a);
if (NULL != p)
- return qdisc_copy_stats(skb, &p->stats);
+ return qdisc_copy_stats(skb, &p->stats, p->stats_lock);
return 1;
}
@@ -452,7 +452,7 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
memset(p, 0, sizeof(*p));
p->refcnt = 1;
spin_lock_init(&p->lock);
- p->stats.lock = &p->lock;
+ p->stats_lock = &p->lock;
if (parm->rate.rate) {
if ((p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1])) == NULL)
goto failure;
@@ -480,7 +480,7 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
p->action = parm->action;
#ifdef CONFIG_NET_ESTIMATOR
if (est)
- qdisc_new_estimator(&p->stats, est);
+ qdisc_new_estimator(&p->stats, p->stats_lock, est);
#endif
h = tcf_police_hash(p->index);
write_lock_bh(&police_lock);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 94169a2b90a2cd..575ca5016a4a4e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -433,7 +433,7 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
sch->dequeue = ops->dequeue;
sch->dev = dev;
atomic_set(&sch->refcnt, 1);
- sch->stats.lock = &dev->queue_lock;
+ sch->stats_lock = &dev->queue_lock;
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
err = -ENOMEM;
@@ -460,7 +460,8 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
write_unlock(&qdisc_tree_lock);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
- qdisc_new_estimator(&sch->stats, tca[TCA_RATE-1]);
+ qdisc_new_estimator(&sch->stats, sch->stats_lock,
+ tca[TCA_RATE-1]);
#endif
return sch;
}
@@ -487,7 +488,8 @@ static int qdisc_change(struct Qdisc *sch, struct rtattr **tca)
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1]) {
qdisc_kill_estimator(&sch->stats);
- qdisc_new_estimator(&sch->stats, tca[TCA_RATE-1]);
+ qdisc_new_estimator(&sch->stats, sch->stats_lock,
+ tca[TCA_RATE-1]);
}
#endif
return 0;
@@ -726,15 +728,15 @@ graft:
return 0;
}
-int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st)
+int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st, spinlock_t *lock)
{
- spin_lock_bh(st->lock);
- RTA_PUT(skb, TCA_STATS, (char*)&st->lock - (char*)st, st);
- spin_unlock_bh(st->lock);
+ spin_lock_bh(lock);
+ RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), st);
+ spin_unlock_bh(lock);
return 0;
rtattr_failure:
- spin_unlock_bh(st->lock);
+ spin_unlock_bh(lock);
return -1;
}
@@ -758,7 +760,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
if (q->ops->dump && q->ops->dump(q, skb) < 0)
goto rtattr_failure;
q->stats.qlen = q->q.qlen;
- if (qdisc_copy_stats(skb, &q->stats))
+ if (qdisc_copy_stats(skb, &q->stats, q->stats_lock))
goto rtattr_failure;
nlh->nlmsg_len = skb->tail - b;
return skb->len;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index d81d11981269ef..c43a6e014656a4 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -70,6 +70,7 @@ struct atm_flow_data {
u32 classid; /* x:y type ID */
int ref; /* reference count */
struct tc_stats stats;
+ spinlock_t *stats_lock;
struct atm_flow_data *next;
struct atm_flow_data *excess; /* flow for excess traffic;
NULL to set CLP instead */
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 66b6ba1aa33910..2d484883465f79 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -147,6 +147,7 @@ struct cbq_class
long deficit; /* Saved deficit for WRR */
unsigned long penalized;
struct tc_stats stats;
+ spinlock_t *stats_lock;
struct tc_cbq_xstats xstats;
struct tcf_proto *filter_list;
@@ -1468,7 +1469,7 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt)
q->link.ewma_log = TC_CBQ_DEF_EWMA;
q->link.avpkt = q->link.allot/2;
q->link.minidle = -0x7FFFFFFF;
- q->link.stats.lock = &sch->dev->queue_lock;
+ q->link.stats_lock = &sch->dev->queue_lock;
init_timer(&q->wd_timer);
q->wd_timer.data = (unsigned long)sch;
@@ -1667,7 +1668,7 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg,
goto rtattr_failure;
rta->rta_len = skb->tail - b;
cl->stats.qlen = cl->q->q.qlen;
- if (qdisc_copy_stats(skb, &cl->stats))
+ if (qdisc_copy_stats(skb, &cl->stats, cl->stats_lock))
goto rtattr_failure;
spin_lock_bh(&sch->dev->queue_lock);
cl->xstats.avgidle = cl->avgidle;
@@ -1897,7 +1898,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1]) {
qdisc_kill_estimator(&cl->stats);
- qdisc_new_estimator(&cl->stats, tca[TCA_RATE-1]);
+ qdisc_new_estimator(&cl->stats, cl->stats_lock,
+ tca[TCA_RATE-1]);
}
#endif
return 0;
@@ -1958,7 +1960,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
cl->allot = parent->allot;
cl->quantum = cl->allot;
cl->weight = cl->R_tab->rate.rate;
- cl->stats.lock = &sch->dev->queue_lock;
+ cl->stats_lock = &sch->dev->queue_lock;
sch_tree_lock(sch);
cbq_link_class(cl);
@@ -1988,7 +1990,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
- qdisc_new_estimator(&cl->stats, tca[TCA_RATE-1]);
+ qdisc_new_estimator(&cl->stats, cl->stats_lock,
+ tca[TCA_RATE-1]);
#endif
*arg = (unsigned long)cl;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index db8cab2cf1d5ad..672650c572aaf0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -386,7 +386,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops)
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev = dev;
- sch->stats.lock = &dev->queue_lock;
+ sch->stats_lock = &dev->queue_lock;
atomic_set(&sch->refcnt, 1);
/* enqueue is accessed locklessly - make sure it's visible
* before we set a netdevice's qdisc pointer to sch */
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index cf43ce6fefe0e1..ce91e23134e805 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -122,6 +122,7 @@ struct hfsc_class
unsigned int refcnt; /* usage count */
struct tc_stats stats; /* generic statistics */
+ spinlock_t *stats_lock;
unsigned int level; /* class level in hierarchy */
struct tcf_proto *filter_list; /* filter list */
unsigned int filter_cnt; /* filter count */
@@ -1124,7 +1125,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1]) {
qdisc_kill_estimator(&cl->stats);
- qdisc_new_estimator(&cl->stats, tca[TCA_RATE-1]);
+ qdisc_new_estimator(&cl->stats, cl->stats_lock,
+ tca[TCA_RATE-1]);
}
#endif
return 0;
@@ -1167,7 +1169,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
if (cl->qdisc == NULL)
cl->qdisc = &noop_qdisc;
- cl->stats.lock = &sch->dev->queue_lock;
+ cl->stats_lock = &sch->dev->queue_lock;
INIT_LIST_HEAD(&cl->children);
INIT_LIST_HEAD(&cl->actlist);
@@ -1181,7 +1183,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
- qdisc_new_estimator(&cl->stats, tca[TCA_RATE-1]);
+ qdisc_new_estimator(&cl->stats, cl->stats_lock,
+ tca[TCA_RATE-1]);
#endif
*arg = (unsigned long)cl;
return 0;
@@ -1428,7 +1431,7 @@ static inline int
hfsc_dump_stats(struct sk_buff *skb, struct hfsc_class *cl)
{
cl->stats.qlen = cl->qdisc->q.qlen;
- if (qdisc_copy_stats(skb, &cl->stats) < 0)
+ if (qdisc_copy_stats(skb, &cl->stats, cl->stats_lock) < 0)
goto rtattr_failure;
return skb->len;
@@ -1551,7 +1554,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
qopt = RTA_DATA(opt);
memset(q, 0, sizeof(struct hfsc_sched));
- sch->stats.lock = &sch->dev->queue_lock;
+ sch->stats_lock = &sch->dev->queue_lock;
q->defcls = qopt->defcls;
for (i = 0; i < HFSC_HSIZE; i++)
@@ -1566,7 +1569,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
if (q->root.qdisc == NULL)
q->root.qdisc = &noop_qdisc;
- q->root.stats.lock = &sch->dev->queue_lock;
+ q->root.stats_lock = &sch->dev->queue_lock;
INIT_LIST_HEAD(&q->root.children);
INIT_LIST_HEAD(&q->root.actlist);
@@ -1671,7 +1674,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
sch->stats.qlen = sch->q.qlen;
- if (qdisc_copy_stats(skb, &sch->stats) < 0)
+ if (qdisc_copy_stats(skb, &sch->stats, sch->stats_lock) < 0)
goto rtattr_failure;
return skb->len;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 8b7992ed4e141c..65797b3306cccb 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -143,6 +143,7 @@ struct htb_class
/* general class parameters */
u32 classid;
struct tc_stats stats; /* generic stats */
+ spinlock_t *stats_lock;
struct tc_htb_xstats xstats;/* our special stats */
int refcnt; /* usage count of this class */