提交 00b70504 编写于 作者: M Michael Chan 提交者: David S. Miller

[TG3]: Convert to non-LLTX

Herbert Xu pointed out that it is unsafe to call netif_tx_disable()
from LLTX drivers because it uses dev->xmit_lock to synchronize
whereas LLTX drivers use private locks.

Convert tg3 to non-LLTX to fix this issue. tg3 is a lockless driver
where hard_start_xmit and tx completion handling can run concurrently
under normal conditions. A tx_lock is only needed to prevent
netif_stop_queue and netif_wake_queue race condtions when the queue
is full.

So whether we use LLTX or non-LLTX, it makes practically no
difference.
Signed-off-by: NMichael Chan <mchan@broadcom.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 c71302d6
...@@ -3759,14 +3759,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3759,14 +3759,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb); len = skb_headlen(skb);
/* No BH disabling for tx_lock here. We are running in BH disabled /* We are running in BH disabled context with netif_tx_lock
* context and TX reclaim runs via tp->poll inside of a software * and TX reclaim runs via tp->poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have * interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice! * no IRQ context deadlocks to worry about either. Rejoice!
*/ */
if (!spin_trylock(&tp->tx_lock))
return NETDEV_TX_LOCKED;
if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) { if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev); netif_stop_queue(dev);
...@@ -3775,7 +3772,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3775,7 +3772,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
"queue awake!\n", dev->name); "queue awake!\n", dev->name);
} }
spin_unlock(&tp->tx_lock);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -3858,15 +3854,16 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3858,15 +3854,16 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry; tp->tx_prod = entry;
if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
spin_lock(&tp->tx_lock);
netif_stop_queue(dev); netif_stop_queue(dev);
if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev); netif_wake_queue(tp->dev);
spin_unlock(&tp->tx_lock);
} }
out_unlock: out_unlock:
mmiowb(); mmiowb();
spin_unlock(&tp->tx_lock);
dev->trans_start = jiffies; dev->trans_start = jiffies;
...@@ -3885,14 +3882,11 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) ...@@ -3885,14 +3882,11 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb); len = skb_headlen(skb);
/* No BH disabling for tx_lock here. We are running in BH disabled /* We are running in BH disabled context with netif_tx_lock
* context and TX reclaim runs via tp->poll inside of a software * and TX reclaim runs via tp->poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have * interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice! * no IRQ context deadlocks to worry about either. Rejoice!
*/ */
if (!spin_trylock(&tp->tx_lock))
return NETDEV_TX_LOCKED;
if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) { if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev); netif_stop_queue(dev);
...@@ -3901,7 +3895,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) ...@@ -3901,7 +3895,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
"queue awake!\n", dev->name); "queue awake!\n", dev->name);
} }
spin_unlock(&tp->tx_lock);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -4039,15 +4032,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) ...@@ -4039,15 +4032,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry; tp->tx_prod = entry;
if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
spin_lock(&tp->tx_lock);
netif_stop_queue(dev); netif_stop_queue(dev);
if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev); netif_wake_queue(tp->dev);
spin_unlock(&tp->tx_lock);
} }
out_unlock: out_unlock:
mmiowb(); mmiowb();
spin_unlock(&tp->tx_lock);
dev->trans_start = jiffies; dev->trans_start = jiffies;
...@@ -11284,7 +11278,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -11284,7 +11278,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
SET_MODULE_OWNER(dev); SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
dev->features |= NETIF_F_LLTX;
#if TG3_VLAN_TAG_USED #if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_rx_register = tg3_vlan_rx_register; dev->vlan_rx_register = tg3_vlan_rx_register;
......
...@@ -2074,12 +2074,22 @@ struct tg3 { ...@@ -2074,12 +2074,22 @@ struct tg3 {
/* SMP locking strategy: /* SMP locking strategy:
* *
* lock: Held during all operations except TX packet * lock: Held during reset, PHY access, timer, and when
* processing. * updating tg3_flags and tg3_flags2.
* *
* tx_lock: Held during tg3_start_xmit and tg3_tx * tx_lock: Held during tg3_start_xmit and tg3_tx only
* when calling netif_[start|stop]_queue.
* tg3_start_xmit is protected by netif_tx_lock.
* *
* Both of these locks are to be held with BH safety. * Both of these locks are to be held with BH safety.
*
* Because the IRQ handler, tg3_poll, and tg3_start_xmit
* are running lockless, it is necessary to completely
* quiesce the chip with tg3_netif_stop and tg3_full_lock
* before reconfiguring the device.
*
* indirect_lock: Held when accessing registers indirectly
* with IRQ disabling.
*/ */
spinlock_t lock; spinlock_t lock;
spinlock_t indirect_lock; spinlock_t indirect_lock;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册