From fc39965f0aa94c65fa23f8ba569c2830a3c8190f Mon Sep 17 00:00:00 2001 From: mbeauch Date: Fri, 3 Jul 2009 08:30:10 -0500 Subject: [PATCH] net: detect recursive calls to dev_queue_xmit() on RT commit ea481113da8a28b622288918e90ef1977ef55dbc in tip. Changed the real-time patch code to detect recursive calls to dev_queue_xmit and drop the packet when detected. Signed-off-by: Mark Beauchemin [ ported to latest upstream ] Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Signed-off-by: Paul Gortmaker --- drivers/net/bnx2.c | 2 +- drivers/net/mv643xx_eth.c | 6 +++--- drivers/net/niu.c | 2 +- include/linux/netdevice.h | 32 ++++++++++++++++---------------- net/core/dev.c | 10 +++------- net/core/netpoll.c | 2 +- net/sched/sch_generic.c | 4 ++-- 7 files changed, 27 insertions(+), 31 deletions(-) diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index ac90a38..b398c4d 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -2858,7 +2858,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) if (unlikely(netif_tx_queue_stopped(txq)) && (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) { - __netif_tx_lock(txq, smp_processor_id()); + __netif_tx_lock(txq, (void *)current); if ((netif_tx_queue_stopped(txq)) && (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) netif_tx_wake_queue(txq); diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 8613a52..89a155f 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -508,7 +508,7 @@ static void txq_maybe_wake(struct tx_queue *txq) struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); if (netif_tx_queue_stopped(nq)) { - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq, (void *)current); if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) netif_tx_wake_queue(nq); __netif_tx_unlock(nq); @@ -901,7 +901,7 @@ static void txq_kick(struct tx_queue *txq) u32 hw_desc_ptr; u32 expected_ptr; - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq, (void *)current); if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) goto out; @@ -925,7 +925,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); int reclaimed; - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq, (void *)current); reclaimed = 0; while (reclaimed < budget && txq->tx_desc_count > 0) { diff --git a/drivers/net/niu.c b/drivers/net/niu.c index d5cd16b..43f4469 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -3647,7 +3647,7 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) out: if (unlikely(netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { - __netif_tx_lock(txq, smp_processor_id()); + __netif_tx_lock(txq, (void *)current); if (netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) netif_tx_wake_queue(txq); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6be4dde..bd1ac59 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -520,7 +520,7 @@ struct netdev_queue { * write mostly part */ spinlock_t _xmit_lock ____cacheline_aligned_in_smp; - int xmit_lock_owner; + void *xmit_lock_owner; /* * please use this field instead of dev->trans_start */ @@ -1781,41 +1781,41 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) return (1 << debug_value) - 1; } -static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) +static inline void __netif_tx_lock(struct netdev_queue *txq, void *curr) { spin_lock(&txq->_xmit_lock); - txq->xmit_lock_owner = cpu; + txq->xmit_lock_owner = curr; } static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); - txq->xmit_lock_owner = raw_smp_processor_id(); + txq->xmit_lock_owner = (void *)current; } static inline int __netif_tx_trylock(struct netdev_queue *txq) { int ok = spin_trylock(&txq->_xmit_lock); if (likely(ok)) - txq->xmit_lock_owner = raw_smp_processor_id(); + txq->xmit_lock_owner = (void *)current; return ok; } static inline void __netif_tx_unlock(struct netdev_queue *txq) { - txq->xmit_lock_owner = -1; + txq->xmit_lock_owner = (void *)-1; spin_unlock(&txq->_xmit_lock); } static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) { - txq->xmit_lock_owner = -1; + txq->xmit_lock_owner = (void *)-1; spin_unlock_bh(&txq->_xmit_lock); } static inline void txq_trans_update(struct netdev_queue *txq) { - if (txq->xmit_lock_owner != -1) + if (txq->xmit_lock_owner != (void *)-1) txq->trans_start = jiffies; } @@ -1828,10 +1828,10 @@ static inline void txq_trans_update(struct netdev_queue *txq) static inline void netif_tx_lock(struct net_device *dev) { unsigned int i; - int cpu; + void *curr; spin_lock(&dev->tx_global_lock); - cpu = raw_smp_processor_id(); + curr = (void *)current; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); @@ -1841,7 +1841,7 @@ static inline void netif_tx_lock(struct net_device *dev) * the ->hard_start_xmit() handler and already * checked the frozen bit. */ - __netif_tx_lock(txq, cpu); + __netif_tx_lock(txq, curr); set_bit(__QUEUE_STATE_FROZEN, &txq->state); __netif_tx_unlock(txq); } @@ -1876,9 +1876,9 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) local_bh_enable(); } -#define HARD_TX_LOCK(dev, txq, cpu) { \ +#define HARD_TX_LOCK(dev, txq, curr) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ - __netif_tx_lock(txq, cpu); \ + __netif_tx_lock(txq, curr); \ } \ } @@ -1891,14 +1891,14 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) static inline void netif_tx_disable(struct net_device *dev) { unsigned int i; - int cpu; + void *curr; local_bh_disable(); - cpu = raw_smp_processor_id(); + curr = (void *)current; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - __netif_tx_lock(txq, cpu); + __netif_tx_lock(txq, curr); netif_tx_stop_queue(txq); __netif_tx_unlock(txq); } diff --git a/net/core/dev.c b/net/core/dev.c index 99ba080..90dd59b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2133,13 +2133,9 @@ gso: /* * No need to check for recursion with threaded interrupts: */ -#ifdef CONFIG_PREEMPT_RT - if (1) { -#else - if (txq->xmit_lock_owner != cpu) { -#endif + if (txq->xmit_lock_owner != (void *)current) { - HARD_TX_LOCK(dev, txq, cpu); + HARD_TX_LOCK(dev, txq, (void *)current); if (!netif_tx_queue_stopped(txq)) { rc = dev_hard_start_xmit(skb, dev, txq); @@ -4989,7 +4985,7 @@ static void __netdev_init_queue_locks_one(struct net_device *dev, { spin_lock_init(&dev_queue->_xmit_lock); netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type); - dev_queue->xmit_lock_owner = -1; + dev_queue->xmit_lock_owner = (void *)-1; } static void netdev_init_queue_locks(struct net_device *dev) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 2fbd53b..1ef0746 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -75,7 +75,7 @@ static void queue_process(struct work_struct *work) txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); local_irq_save_nort(flags); - __netif_tx_lock(txq, smp_processor_id()); + __netif_tx_lock(txq, (void *)current); if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq) || ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 6d6a2ac..e90a2c6 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -79,7 +79,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, { int ret; - if (unlikely(dev_queue->xmit_lock_owner == raw_smp_processor_id())) { + if (unlikely(dev_queue->xmit_lock_owner == (void *)current)) { /* * Same CPU holding the lock. It may be a transient * configuration error, when hard_start_xmit() recurses. We @@ -121,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, /* And release qdisc */ spin_unlock(root_lock); - HARD_TX_LOCK(dev, txq, raw_smp_processor_id()); + HARD_TX_LOCK(dev, txq, (void *)current); if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); -- 1.7.0.4