diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2005-03-31 04:32:53 -0800 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-03-31 04:32:53 -0800 |
commit | e6762bc7b42787313b20f23342a83e76ba570761 (patch) | |
tree | 4f5f2fe954e5b87b447c82f199f796d313d10e6c | |
parent | 4492174a34f3118342587bedef10117817179375 (diff) | |
download | history-e6762bc7b42787313b20f23342a83e76ba570761.tar.gz |
[PKT_SCHED]: netem: Account for packets in delayed queue in qlen
Netem has a private queue for delayed packets, and currently, packets
in this queue are not accounted for in the qdisc qlen statistics.
This is a problem if netem is used inside another qdisc doing rate
control that peeks at the qlen.
This patch changes the statistics to include the packets held but
not ready to send.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/sched/sch_netem.c | 49 |
1 files changed, 25 insertions, 24 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index c8259cbe652c3f..31c29deb139d3e 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -152,9 +152,6 @@ static int delay_skb(struct Qdisc *sch, struct sk_buff *skb) /* Always queue at tail to keep packets in order */ if (likely(q->delayed.qlen < q->limit)) { __skb_queue_tail(&q->delayed, skb); - sch->bstats.bytes += skb->len; - sch->bstats.packets++; - if (!timer_pending(&q->timer)) { q->timer.expires = jiffies + PSCHED_US2JIFFIE(td); add_timer(&q->timer); @@ -162,7 +159,6 @@ static int delay_skb(struct Qdisc *sch, struct sk_buff *skb) return NET_XMIT_SUCCESS; } - sch->qstats.drops++; kfree_skb(skb); return NET_XMIT_DROP; } @@ -170,6 +166,8 @@ static int delay_skb(struct Qdisc *sch, struct sk_buff *skb) static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb2; + int ret; pr_debug("netem_enqueue skb=%p @%lu\n", skb, jiffies); @@ -182,12 +180,16 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) } /* Random duplication */ - if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) { - struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); - + if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor) + && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { pr_debug("netem_enqueue: dup %p\n", skb2); - if (skb2) - delay_skb(sch, skb2); + + if (delay_skb(sch, skb2)) { + sch->q.qlen++; + sch->bstats.bytes += skb2->len; + sch->bstats.packets++; + } else + sch->qstats.drops++; } /* If doing simple delay then gap == 0 so all packets @@ -196,22 +198,21 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) * packets will be delayed. */ if (q->counter < q->gap) { - int ret; - ++q->counter; ret = q->qdisc->enqueue(skb, q->qdisc); - if (likely(ret == NET_XMIT_SUCCESS)) { - sch->q.qlen++; - sch->bstats.bytes += skb->len; - sch->bstats.packets++; - } else - sch->qstats.drops++; - return ret; + } else { + q->counter = 0; + ret = delay_skb(sch, skb); } - - q->counter = 0; - return delay_skb(sch, skb); + if (likely(ret == NET_XMIT_SUCCESS)) { + sch->q.qlen++; + sch->bstats.bytes += skb->len; + sch->bstats.packets++; + } else + sch->qstats.drops++; + + return ret; } /* Requeue packets but don't change time stamp */ @@ -283,10 +284,10 @@ static void netem_watchdog(unsigned long arg) } __skb_unlink(skb, &q->delayed); - if (q->qdisc->enqueue(skb, q->qdisc)) + if (q->qdisc->enqueue(skb, q->qdisc)) { + sch->q.qlen--; sch->qstats.drops++; - else - sch->q.qlen++; + } } qdisc_run(dev); spin_unlock_bh(&dev->queue_lock); |