提交 668f895a 编写于 作者: P Pavel Emelyanov 提交者: David S. Miller

[NET]: Hide the queue_mapping field inside netif_subqueue_stopped

Many places get the queue_mapping field from skb to pass it to the
netif_subqueue_stopped() which will be 0 in any case.

Make the helper that works with sk_buff
Signed-off-by: NPavel Emelyanov <xemul@openvz.org>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 4e3ab47a
...@@ -996,7 +996,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) ...@@ -996,7 +996,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
* *
* Check individual transmit queue of a device with multiple transmit queues. * Check individual transmit queue of a device with multiple transmit queues.
*/ */
static inline int netif_subqueue_stopped(const struct net_device *dev, static inline int __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index) u16 queue_index)
{ {
#ifdef CONFIG_NETDEVICES_MULTIQUEUE #ifdef CONFIG_NETDEVICES_MULTIQUEUE
...@@ -1007,6 +1007,11 @@ static inline int netif_subqueue_stopped(const struct net_device *dev, ...@@ -1007,6 +1007,11 @@ static inline int netif_subqueue_stopped(const struct net_device *dev,
#endif #endif
} }
static inline int netif_subqueue_stopped(const struct net_device *dev,
struct sk_buff *skb)
{
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
/** /**
* netif_wake_subqueue - allow sending packets on subqueue * netif_wake_subqueue - allow sending packets on subqueue
......
...@@ -1553,7 +1553,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1553,7 +1553,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
return rc; return rc;
} }
if (unlikely((netif_queue_stopped(dev) || if (unlikely((netif_queue_stopped(dev) ||
netif_subqueue_stopped(dev, skb->queue_mapping)) && netif_subqueue_stopped(dev, skb)) &&
skb->next)) skb->next))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} while (skb->next); } while (skb->next);
...@@ -1692,7 +1692,7 @@ int dev_queue_xmit(struct sk_buff *skb) ...@@ -1692,7 +1692,7 @@ int dev_queue_xmit(struct sk_buff *skb)
HARD_TX_LOCK(dev, cpu); HARD_TX_LOCK(dev, cpu);
if (!netif_queue_stopped(dev) && if (!netif_queue_stopped(dev) &&
!netif_subqueue_stopped(dev, skb->queue_mapping)) { !netif_subqueue_stopped(dev, skb)) {
rc = 0; rc = 0;
if (!dev_hard_start_xmit(skb, dev)) { if (!dev_hard_start_xmit(skb, dev)) {
HARD_TX_UNLOCK(dev); HARD_TX_UNLOCK(dev);
......
...@@ -67,7 +67,7 @@ static void queue_process(struct work_struct *work) ...@@ -67,7 +67,7 @@ static void queue_process(struct work_struct *work)
local_irq_save(flags); local_irq_save(flags);
netif_tx_lock(dev); netif_tx_lock(dev);
if ((netif_queue_stopped(dev) || if ((netif_queue_stopped(dev) ||
netif_subqueue_stopped(dev, skb->queue_mapping)) || netif_subqueue_stopped(dev, skb)) ||
dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb); skb_queue_head(&npinfo->txq, skb);
netif_tx_unlock(dev); netif_tx_unlock(dev);
...@@ -269,7 +269,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) ...@@ -269,7 +269,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
tries > 0; --tries) { tries > 0; --tries) {
if (netif_tx_trylock(dev)) { if (netif_tx_trylock(dev)) {
if (!netif_queue_stopped(dev) && if (!netif_queue_stopped(dev) &&
!netif_subqueue_stopped(dev, skb->queue_mapping)) !netif_subqueue_stopped(dev, skb))
status = dev->hard_start_xmit(skb, dev); status = dev->hard_start_xmit(skb, dev);
netif_tx_unlock(dev); netif_tx_unlock(dev);
......
...@@ -3383,7 +3383,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3383,7 +3383,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
if ((netif_queue_stopped(odev) || if ((netif_queue_stopped(odev) ||
(pkt_dev->skb && (pkt_dev->skb &&
netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) || netif_subqueue_stopped(odev, pkt_dev->skb))) ||
need_resched()) { need_resched()) {
idle_start = getCurUs(); idle_start = getCurUs();
...@@ -3400,7 +3400,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3400,7 +3400,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->idle_acc += getCurUs() - idle_start; pkt_dev->idle_acc += getCurUs() - idle_start;
if (netif_queue_stopped(odev) || if (netif_queue_stopped(odev) ||
netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { netif_subqueue_stopped(odev, pkt_dev->skb)) {
pkt_dev->next_tx_us = getCurUs(); /* TODO */ pkt_dev->next_tx_us = getCurUs(); /* TODO */
pkt_dev->next_tx_ns = 0; pkt_dev->next_tx_ns = 0;
goto out; /* Try the next interface */ goto out; /* Try the next interface */
...@@ -3429,7 +3429,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3429,7 +3429,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
netif_tx_lock_bh(odev); netif_tx_lock_bh(odev);
if (!netif_queue_stopped(odev) && if (!netif_queue_stopped(odev) &&
!netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { !netif_subqueue_stopped(odev, pkt_dev->skb)) {
atomic_inc(&(pkt_dev->skb->users)); atomic_inc(&(pkt_dev->skb->users));
retry_now: retry_now:
......
...@@ -284,7 +284,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -284,7 +284,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
if (slave->qdisc_sleeping != q) if (slave->qdisc_sleeping != q)
continue; continue;
if (netif_queue_stopped(slave) || if (netif_queue_stopped(slave) ||
netif_subqueue_stopped(slave, subq) || __netif_subqueue_stopped(slave, subq) ||
!netif_running(slave)) { !netif_running(slave)) {
busy = 1; busy = 1;
continue; continue;
...@@ -294,7 +294,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -294,7 +294,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
case 0: case 0:
if (netif_tx_trylock(slave)) { if (netif_tx_trylock(slave)) {
if (!netif_queue_stopped(slave) && if (!netif_queue_stopped(slave) &&
!netif_subqueue_stopped(slave, subq) && !__netif_subqueue_stopped(slave, subq) &&
slave->hard_start_xmit(skb, slave) == 0) { slave->hard_start_xmit(skb, slave) == 0) {
netif_tx_unlock(slave); netif_tx_unlock(slave);
master->slaves = NEXT_SLAVE(q); master->slaves = NEXT_SLAVE(q);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册