提交 fd2ea0a7 编写于 作者: D David S. Miller

net: Use queue aware tests throughout.

This effectively "flips the switch" by making the core networking
and multiqueue-aware drivers use the new TX multiqueue structures.

Non-multiqueue drivers need no changes.  The interfaces they use such
as netif_stop_queue() degenerate into an operation on TX queue zero.
So everything "just works" for them.

Code that really wants to do "X" to all TX queues now invokes a
routine that does so, such as netif_tx_wake_all_queues(),
netif_tx_stop_all_queues(), etc.

pktgen and netpoll required a little bit more surgery than the others.

In particular the pktgen changes, whilst functional, could be largely
improved.  The initial check in pktgen_xmit() will sometimes check the
wrong queue, which is mostly harmless.  The thing to do is probably to
invoke fill_packet() earlier.

The bulk of the netpoll changes is to make the code operate solely on
the TX queue indicated by by the SKB queue mapping.

Setting of the SKB queue mapping is entirely confined inside of
net/core/dev.c:dev_pick_tx().  If we end up needing any kind of
special semantics (drops, for example) it will be implemented here.

Finally, we now have a "real_num_tx_queues" which is where the driver
indicates how many TX queues are actually active.

With IGB changes from Jeff Kirsher.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 24344d26
...@@ -544,7 +544,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget) ...@@ -544,7 +544,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
spin_unlock(&priv->rx_lock); spin_unlock(&priv->rx_lock);
netif_rx_complete(priv->dev, napi); netif_rx_complete(priv->dev, napi);
netif_stop_queue(priv->dev); netif_tx_stop_all_queues(priv->dev);
napi_disable(&priv->napi); napi_disable(&priv->napi);
atomic_inc(&priv->reset_pending); atomic_inc(&priv->reset_pending);
...@@ -750,9 +750,7 @@ static void cpmac_hw_error(struct work_struct *work) ...@@ -750,9 +750,7 @@ static void cpmac_hw_error(struct work_struct *work)
barrier(); barrier();
atomic_dec(&priv->reset_pending); atomic_dec(&priv->reset_pending);
for (i = 0; i < CPMAC_QUEUES; i++) netif_tx_wake_all_queues(priv->dev);
netif_wake_subqueue(priv->dev, i);
netif_wake_queue(priv->dev);
cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
} }
...@@ -781,7 +779,7 @@ static void cpmac_check_status(struct net_device *dev) ...@@ -781,7 +779,7 @@ static void cpmac_check_status(struct net_device *dev)
dev->name, tx_code, tx_channel, macstatus); dev->name, tx_code, tx_channel, macstatus);
} }
netif_stop_queue(dev); netif_tx_stop_all_queues(dev);
cpmac_hw_stop(dev); cpmac_hw_stop(dev);
if (schedule_work(&priv->reset_work)) if (schedule_work(&priv->reset_work))
atomic_inc(&priv->reset_pending); atomic_inc(&priv->reset_pending);
...@@ -842,9 +840,7 @@ static void cpmac_tx_timeout(struct net_device *dev) ...@@ -842,9 +840,7 @@ static void cpmac_tx_timeout(struct net_device *dev)
barrier(); barrier();
atomic_dec(&priv->reset_pending); atomic_dec(&priv->reset_pending);
netif_wake_queue(priv->dev); netif_tx_wake_all_queues(priv->dev);
for (i = 0; i < CPMAC_QUEUES; i++)
netif_wake_subqueue(dev, i);
} }
static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
...@@ -935,7 +931,7 @@ static void cpmac_adjust_link(struct net_device *dev) ...@@ -935,7 +931,7 @@ static void cpmac_adjust_link(struct net_device *dev)
spin_lock(&priv->lock); spin_lock(&priv->lock);
if (priv->phy->link) { if (priv->phy->link) {
netif_start_queue(dev); netif_tx_start_all_queues(dev);
if (priv->phy->duplex != priv->oldduplex) { if (priv->phy->duplex != priv->oldduplex) {
new_state = 1; new_state = 1;
priv->oldduplex = priv->phy->duplex; priv->oldduplex = priv->phy->duplex;
...@@ -949,10 +945,10 @@ static void cpmac_adjust_link(struct net_device *dev) ...@@ -949,10 +945,10 @@ static void cpmac_adjust_link(struct net_device *dev)
if (!priv->oldlink) { if (!priv->oldlink) {
new_state = 1; new_state = 1;
priv->oldlink = 1; priv->oldlink = 1;
netif_schedule(dev); netif_tx_schedule_all(dev);
} }
} else if (priv->oldlink) { } else if (priv->oldlink) {
netif_stop_queue(dev); netif_tx_stop_all_queues(dev);
new_state = 1; new_state = 1;
priv->oldlink = 0; priv->oldlink = 0;
priv->oldspeed = 0; priv->oldspeed = 0;
...@@ -1072,7 +1068,7 @@ static int cpmac_stop(struct net_device *dev) ...@@ -1072,7 +1068,7 @@ static int cpmac_stop(struct net_device *dev)
struct cpmac_priv *priv = netdev_priv(dev); struct cpmac_priv *priv = netdev_priv(dev);
struct resource *mem; struct resource *mem;
netif_stop_queue(dev); netif_tx_stop_all_queues(dev);
cancel_work_sync(&priv->reset_work); cancel_work_sync(&priv->reset_work);
napi_disable(&priv->napi); napi_disable(&priv->napi);
......
...@@ -533,7 +533,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) ...@@ -533,7 +533,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
adapter->flags |= IGB_FLAG_HAS_MSI; adapter->flags |= IGB_FLAG_HAS_MSI;
/* Notify the stack of the (possibly) reduced Tx Queue count. */ /* Notify the stack of the (possibly) reduced Tx Queue count. */
adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
return; return;
} }
...@@ -821,9 +821,7 @@ void igb_down(struct igb_adapter *adapter) ...@@ -821,9 +821,7 @@ void igb_down(struct igb_adapter *adapter)
wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
/* flush and sleep below */ /* flush and sleep below */
netif_stop_queue(netdev); netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++)
netif_stop_subqueue(netdev, i);
/* disable transmits in the hardware */ /* disable transmits in the hardware */
tctl = rd32(E1000_TCTL); tctl = rd32(E1000_TCTL);
...@@ -1266,9 +1264,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, ...@@ -1266,9 +1264,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
/* tell the stack to leave us alone until igb_open() is called */ /* tell the stack to leave us alone until igb_open() is called */
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_stop_queue(netdev); netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++)
netif_stop_subqueue(netdev, i);
strcpy(netdev->name, "eth%d"); strcpy(netdev->name, "eth%d");
err = register_netdev(netdev); err = register_netdev(netdev);
...@@ -2315,7 +2311,6 @@ static void igb_watchdog_task(struct work_struct *work) ...@@ -2315,7 +2311,6 @@ static void igb_watchdog_task(struct work_struct *work)
struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_mac_info *mac = &adapter->hw.mac;
u32 link; u32 link;
s32 ret_val; s32 ret_val;
int i;
if ((netif_carrier_ok(netdev)) && if ((netif_carrier_ok(netdev)) &&
(rd32(E1000_STATUS) & E1000_STATUS_LU)) (rd32(E1000_STATUS) & E1000_STATUS_LU))
...@@ -2371,9 +2366,7 @@ static void igb_watchdog_task(struct work_struct *work) ...@@ -2371,9 +2366,7 @@ static void igb_watchdog_task(struct work_struct *work)
} }
netif_carrier_on(netdev); netif_carrier_on(netdev);
netif_wake_queue(netdev); netif_tx_wake_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++)
netif_wake_subqueue(netdev, i);
if (!test_bit(__IGB_DOWN, &adapter->state)) if (!test_bit(__IGB_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer, mod_timer(&adapter->phy_info_timer,
...@@ -2385,9 +2378,7 @@ static void igb_watchdog_task(struct work_struct *work) ...@@ -2385,9 +2378,7 @@ static void igb_watchdog_task(struct work_struct *work)
adapter->link_duplex = 0; adapter->link_duplex = 0;
dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_stop_queue(netdev); netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++)
netif_stop_subqueue(netdev, i);
if (!test_bit(__IGB_DOWN, &adapter->state)) if (!test_bit(__IGB_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer, mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ)); round_jiffies(jiffies + 2 * HZ));
......
...@@ -252,16 +252,10 @@ static int ixgbe_set_tso(struct net_device *netdev, u32 data) ...@@ -252,16 +252,10 @@ static int ixgbe_set_tso(struct net_device *netdev, u32 data)
netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6; netdev->features |= NETIF_F_TSO6;
} else { } else {
struct ixgbe_adapter *adapter = netdev_priv(netdev); netif_tx_stop_all_queues(netdev);
int i;
netif_stop_queue(netdev);
for (i = 0; i < adapter->num_tx_queues; i++)
netif_stop_subqueue(netdev, i);
netdev->features &= ~NETIF_F_TSO; netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6; netdev->features &= ~NETIF_F_TSO6;
for (i = 0; i < adapter->num_tx_queues; i++) netif_tx_start_all_queues(netdev);
netif_start_subqueue(netdev, i);
netif_start_queue(netdev);
} }
return 0; return 0;
} }
......
...@@ -2013,7 +2013,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ...@@ -2013,7 +2013,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->watchdog_timer);
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_stop_queue(netdev); netif_tx_stop_all_queues(netdev);
if (!pci_channel_offline(adapter->pdev)) if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter); ixgbe_reset(adapter);
...@@ -2359,7 +2359,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter ...@@ -2359,7 +2359,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
out: out:
/* Notify the stack of the (possibly) reduced Tx Queue count. */ /* Notify the stack of the (possibly) reduced Tx Queue count. */
adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
return err; return err;
} }
...@@ -2896,7 +2896,6 @@ static void ixgbe_watchdog(unsigned long data) ...@@ -2896,7 +2896,6 @@ static void ixgbe_watchdog(unsigned long data)
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
bool link_up; bool link_up;
u32 link_speed = 0; u32 link_speed = 0;
int i;
adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
...@@ -2917,9 +2916,7 @@ static void ixgbe_watchdog(unsigned long data) ...@@ -2917,9 +2916,7 @@ static void ixgbe_watchdog(unsigned long data)
(FLOW_TX ? "TX" : "None")))); (FLOW_TX ? "TX" : "None"))));
netif_carrier_on(netdev); netif_carrier_on(netdev);
netif_wake_queue(netdev); netif_tx_wake_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++)
netif_wake_subqueue(netdev, i);
} else { } else {
/* Force detection of hung controller */ /* Force detection of hung controller */
adapter->detect_tx_hung = true; adapter->detect_tx_hung = true;
...@@ -2928,7 +2925,7 @@ static void ixgbe_watchdog(unsigned long data) ...@@ -2928,7 +2925,7 @@ static void ixgbe_watchdog(unsigned long data)
if (netif_carrier_ok(netdev)) { if (netif_carrier_ok(netdev)) {
DPRINTK(LINK, INFO, "NIC Link is Down\n"); DPRINTK(LINK, INFO, "NIC Link is Down\n");
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_stop_queue(netdev); netif_tx_stop_all_queues(netdev);
} }
} }
...@@ -3631,9 +3628,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -3631,9 +3628,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
ixgbe_start_hw(hw); ixgbe_start_hw(hw);
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_stop_queue(netdev); netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++)
netif_stop_subqueue(netdev, i);
ixgbe_napi_add_all(adapter); ixgbe_napi_add_all(adapter);
......
...@@ -545,63 +545,53 @@ static struct pci_driver s2io_driver = { ...@@ -545,63 +545,53 @@ static struct pci_driver s2io_driver = {
/* netqueue manipulation helper functions */ /* netqueue manipulation helper functions */
static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp) static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
{ {
int i; if (!sp->config.multiq) {
if (sp->config.multiq) { int i;
for (i = 0; i < sp->config.tx_fifo_num; i++)
netif_stop_subqueue(sp->dev, i);
} else {
for (i = 0; i < sp->config.tx_fifo_num; i++) for (i = 0; i < sp->config.tx_fifo_num; i++)
sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP; sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
netif_stop_queue(sp->dev);
} }
netif_tx_stop_all_queues(sp->dev);
} }
static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no) static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
{ {
if (sp->config.multiq) if (!sp->config.multiq)
netif_stop_subqueue(sp->dev, fifo_no);
else {
sp->mac_control.fifos[fifo_no].queue_state = sp->mac_control.fifos[fifo_no].queue_state =
FIFO_QUEUE_STOP; FIFO_QUEUE_STOP;
netif_stop_queue(sp->dev);
} netif_tx_stop_all_queues(sp->dev);
} }
static inline void s2io_start_all_tx_queue(struct s2io_nic *sp) static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
{ {
int i; if (!sp->config.multiq) {
if (sp->config.multiq) { int i;
for (i = 0; i < sp->config.tx_fifo_num; i++)
netif_start_subqueue(sp->dev, i);
} else {
for (i = 0; i < sp->config.tx_fifo_num; i++) for (i = 0; i < sp->config.tx_fifo_num; i++)
sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
netif_start_queue(sp->dev);
} }
netif_tx_start_all_queues(sp->dev);
} }
static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no) static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
{ {
if (sp->config.multiq) if (!sp->config.multiq)
netif_start_subqueue(sp->dev, fifo_no);
else {
sp->mac_control.fifos[fifo_no].queue_state = sp->mac_control.fifos[fifo_no].queue_state =
FIFO_QUEUE_START; FIFO_QUEUE_START;
netif_start_queue(sp->dev);
} netif_tx_start_all_queues(sp->dev);
} }
static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp) static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
{ {
int i; if (!sp->config.multiq) {
if (sp->config.multiq) { int i;
for (i = 0; i < sp->config.tx_fifo_num; i++)
netif_wake_subqueue(sp->dev, i);
} else {
for (i = 0; i < sp->config.tx_fifo_num; i++) for (i = 0; i < sp->config.tx_fifo_num; i++)
sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
netif_wake_queue(sp->dev);
} }
netif_tx_wake_all_queues(sp->dev);
} }
static inline void s2io_wake_tx_queue( static inline void s2io_wake_tx_queue(
...@@ -8691,5 +8681,5 @@ static void s2io_io_resume(struct pci_dev *pdev) ...@@ -8691,5 +8681,5 @@ static void s2io_io_resume(struct pci_dev *pdev)
} }
netif_device_attach(netdev); netif_device_attach(netdev);
netif_wake_queue(netdev); netif_tx_wake_all_queues(netdev);
} }
...@@ -642,7 +642,13 @@ struct net_device ...@@ -642,7 +642,13 @@ struct net_device
struct netdev_queue rx_queue; struct netdev_queue rx_queue;
struct netdev_queue *_tx ____cacheline_aligned_in_smp; struct netdev_queue *_tx ____cacheline_aligned_in_smp;
/* Number of TX queues allocated at alloc_netdev_mq() time */
unsigned int num_tx_queues; unsigned int num_tx_queues;
/* Number of TX queues currently active in device */
unsigned int real_num_tx_queues;
unsigned long tx_queue_len; /* Max frames per queue allowed */ unsigned long tx_queue_len; /* Max frames per queue allowed */
/* /*
...@@ -1000,6 +1006,14 @@ static inline void netif_schedule(struct net_device *dev) ...@@ -1000,6 +1006,14 @@ static inline void netif_schedule(struct net_device *dev)
netif_schedule_queue(netdev_get_tx_queue(dev, 0)); netif_schedule_queue(netdev_get_tx_queue(dev, 0));
} }
static inline void netif_tx_schedule_all(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++)
netif_schedule_queue(netdev_get_tx_queue(dev, i));
}
/** /**
* netif_start_queue - allow transmit * netif_start_queue - allow transmit
* @dev: network device * @dev: network device
...@@ -1016,6 +1030,16 @@ static inline void netif_start_queue(struct net_device *dev) ...@@ -1016,6 +1030,16 @@ static inline void netif_start_queue(struct net_device *dev)
netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
} }
static inline void netif_tx_start_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_start_queue(txq);
}
}
/** /**
* netif_wake_queue - restart transmit * netif_wake_queue - restart transmit
* @dev: network device * @dev: network device
...@@ -1040,6 +1064,16 @@ static inline void netif_wake_queue(struct net_device *dev) ...@@ -1040,6 +1064,16 @@ static inline void netif_wake_queue(struct net_device *dev)
netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
} }
static inline void netif_tx_wake_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_wake_queue(txq);
}
}
/** /**
* netif_stop_queue - stop transmitted packets * netif_stop_queue - stop transmitted packets
* @dev: network device * @dev: network device
...@@ -1057,6 +1091,16 @@ static inline void netif_stop_queue(struct net_device *dev) ...@@ -1057,6 +1091,16 @@ static inline void netif_stop_queue(struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
} }
static inline void netif_tx_stop_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_stop_queue(txq);
}
}
/** /**
* netif_queue_stopped - test if transmit queue is flowblocked * netif_queue_stopped - test if transmit queue is flowblocked
* @dev: network device * @dev: network device
...@@ -1100,7 +1144,8 @@ static inline int netif_running(const struct net_device *dev) ...@@ -1100,7 +1144,8 @@ static inline int netif_running(const struct net_device *dev)
*/ */
static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{ {
clear_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state); struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
clear_bit(__QUEUE_STATE_XOFF, &txq->state);
} }
/** /**
...@@ -1112,11 +1157,12 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) ...@@ -1112,11 +1157,12 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
*/ */
static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
{ {
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
#ifdef CONFIG_NETPOLL_TRAP #ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap()) if (netpoll_trap())
return; return;
#endif #endif
set_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state); set_bit(__QUEUE_STATE_XOFF, &txq->state);
} }
/** /**
...@@ -1129,8 +1175,8 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) ...@@ -1129,8 +1175,8 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
static inline int __netif_subqueue_stopped(const struct net_device *dev, static inline int __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index) u16 queue_index)
{ {
return test_bit(__QUEUE_STATE_XOFF, struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
&dev->egress_subqueue[queue_index].state); return test_bit(__QUEUE_STATE_XOFF, &txq->state);
} }
static inline int netif_subqueue_stopped(const struct net_device *dev, static inline int netif_subqueue_stopped(const struct net_device *dev,
...@@ -1148,13 +1194,13 @@ static inline int netif_subqueue_stopped(const struct net_device *dev, ...@@ -1148,13 +1194,13 @@ static inline int netif_subqueue_stopped(const struct net_device *dev,
*/ */
static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
{ {
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
#ifdef CONFIG_NETPOLL_TRAP #ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap()) if (netpoll_trap())
return; return;
#endif #endif
if (test_and_clear_bit(__QUEUE_STATE_XOFF, if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
&dev->egress_subqueue[queue_index].state)) __netif_schedule(txq);
__netif_schedule(netdev_get_tx_queue(dev, 0));
} }
/** /**
...@@ -1198,7 +1244,8 @@ extern int dev_set_mtu(struct net_device *, int); ...@@ -1198,7 +1244,8 @@ extern int dev_set_mtu(struct net_device *, int);
extern int dev_set_mac_address(struct net_device *, extern int dev_set_mac_address(struct net_device *,
struct sockaddr *); struct sockaddr *);
extern int dev_hard_start_xmit(struct sk_buff *skb, extern int dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev); struct net_device *dev,
struct netdev_queue *txq);
extern int netdev_budget; extern int netdev_budget;
...@@ -1447,6 +1494,12 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) ...@@ -1447,6 +1494,12 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
txq->xmit_lock_owner = cpu; txq->xmit_lock_owner = cpu;
} }
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
txq->xmit_lock_owner = smp_processor_id();
}
static inline void netif_tx_lock(struct net_device *dev) static inline void netif_tx_lock(struct net_device *dev)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -1483,6 +1536,12 @@ static inline void __netif_tx_unlock(struct netdev_queue *txq) ...@@ -1483,6 +1536,12 @@ static inline void __netif_tx_unlock(struct netdev_queue *txq)
spin_unlock(&txq->_xmit_lock); spin_unlock(&txq->_xmit_lock);
} }
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
txq->xmit_lock_owner = -1;
spin_unlock_bh(&txq->_xmit_lock);
}
static inline void netif_tx_unlock(struct net_device *dev) static inline void netif_tx_unlock(struct net_device *dev)
{ {
unsigned int i; unsigned int i;
...@@ -1514,8 +1573,13 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) ...@@ -1514,8 +1573,13 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
static inline void netif_tx_disable(struct net_device *dev) static inline void netif_tx_disable(struct net_device *dev)
{ {
unsigned int i;
netif_tx_lock_bh(dev); netif_tx_lock_bh(dev);
netif_stop_queue(dev); for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_stop_queue(txq);
}
netif_tx_unlock_bh(dev); netif_tx_unlock_bh(dev);
} }
......
...@@ -88,9 +88,7 @@ extern void __qdisc_run(struct netdev_queue *txq); ...@@ -88,9 +88,7 @@ extern void __qdisc_run(struct netdev_queue *txq);
static inline void qdisc_run(struct netdev_queue *txq) static inline void qdisc_run(struct netdev_queue *txq)
{ {
struct net_device *dev = txq->dev; if (!netif_tx_queue_stopped(txq) &&
if (!netif_queue_stopped(dev) &&
!test_and_set_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state)) !test_and_set_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state))
__qdisc_run(txq); __qdisc_run(txq);
} }
......
...@@ -1598,7 +1598,8 @@ static int dev_gso_segment(struct sk_buff *skb) ...@@ -1598,7 +1598,8 @@ static int dev_gso_segment(struct sk_buff *skb)
return 0; return 0;
} }
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{ {
if (likely(!skb->next)) { if (likely(!skb->next)) {
if (!list_empty(&ptype_all)) if (!list_empty(&ptype_all))
...@@ -1627,9 +1628,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1627,9 +1628,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->next = nskb; skb->next = nskb;
return rc; return rc;
} }
if (unlikely((netif_queue_stopped(dev) || if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
netif_subqueue_stopped(dev, skb)) &&
skb->next))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} while (skb->next); } while (skb->next);
...@@ -1669,7 +1668,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1669,7 +1668,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
static struct netdev_queue *dev_pick_tx(struct net_device *dev, static struct netdev_queue *dev_pick_tx(struct net_device *dev,
struct sk_buff *skb) struct sk_buff *skb)
{ {
return netdev_get_tx_queue(dev, 0); u16 queue_index = 0;
skb_set_queue_mapping(skb, queue_index);
return netdev_get_tx_queue(dev, queue_index);
} }
int dev_queue_xmit(struct sk_buff *skb) int dev_queue_xmit(struct sk_buff *skb)
...@@ -1737,8 +1739,6 @@ int dev_queue_xmit(struct sk_buff *skb) ...@@ -1737,8 +1739,6 @@ int dev_queue_xmit(struct sk_buff *skb)
spin_lock(&txq->lock); spin_lock(&txq->lock);
q = txq->qdisc; q = txq->qdisc;
if (q->enqueue) { if (q->enqueue) {
/* reset queue_mapping to zero */
skb_set_queue_mapping(skb, 0);
rc = q->enqueue(skb, q); rc = q->enqueue(skb, q);
qdisc_run(txq); qdisc_run(txq);
spin_unlock(&txq->lock); spin_unlock(&txq->lock);
...@@ -1768,10 +1768,9 @@ int dev_queue_xmit(struct sk_buff *skb) ...@@ -1768,10 +1768,9 @@ int dev_queue_xmit(struct sk_buff *skb)
HARD_TX_LOCK(dev, txq, cpu); HARD_TX_LOCK(dev, txq, cpu);
if (!netif_queue_stopped(dev) && if (!netif_tx_queue_stopped(txq)) {
!netif_subqueue_stopped(dev, skb)) {
rc = 0; rc = 0;
if (!dev_hard_start_xmit(skb, dev)) { if (!dev_hard_start_xmit(skb, dev, txq)) {
HARD_TX_UNLOCK(dev, txq); HARD_TX_UNLOCK(dev, txq);
goto out; goto out;
} }
...@@ -4160,8 +4159,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, ...@@ -4160,8 +4159,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
BUG_ON(strlen(name) >= sizeof(dev->name)); BUG_ON(strlen(name) >= sizeof(dev->name));
alloc_size = sizeof(struct net_device) + alloc_size = sizeof(struct net_device);
sizeof(struct net_device_subqueue) * (queue_count - 1);
if (sizeof_priv) { if (sizeof_priv) {
/* ensure 32-byte alignment of private area */ /* ensure 32-byte alignment of private area */
alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
...@@ -4191,16 +4189,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, ...@@ -4191,16 +4189,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
dev->_tx = tx; dev->_tx = tx;
dev->num_tx_queues = queue_count; dev->num_tx_queues = queue_count;
dev->real_num_tx_queues = queue_count;
if (sizeof_priv) { if (sizeof_priv) {
dev->priv = ((char *)dev + dev->priv = ((char *)dev +
((sizeof(struct net_device) + ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
(sizeof(struct net_device_subqueue) *
(queue_count - 1)) + NETDEV_ALIGN_CONST)
& ~NETDEV_ALIGN_CONST)); & ~NETDEV_ALIGN_CONST));
} }
dev->egress_subqueue_count = queue_count;
dev->gso_max_size = GSO_MAX_SIZE; dev->gso_max_size = GSO_MAX_SIZE;
netdev_init_queues(dev); netdev_init_queues(dev);
......
...@@ -58,25 +58,27 @@ static void queue_process(struct work_struct *work) ...@@ -58,25 +58,27 @@ static void queue_process(struct work_struct *work)
while ((skb = skb_dequeue(&npinfo->txq))) { while ((skb = skb_dequeue(&npinfo->txq))) {
struct net_device *dev = skb->dev; struct net_device *dev = skb->dev;
struct netdev_queue *txq;
if (!netif_device_present(dev) || !netif_running(dev)) { if (!netif_device_present(dev) || !netif_running(dev)) {
__kfree_skb(skb); __kfree_skb(skb);
continue; continue;
} }
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
local_irq_save(flags); local_irq_save(flags);
netif_tx_lock(dev); __netif_tx_lock(txq, smp_processor_id());
if ((netif_queue_stopped(dev) || if (netif_tx_queue_stopped(txq) ||
netif_subqueue_stopped(dev, skb)) || dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb); skb_queue_head(&npinfo->txq, skb);
netif_tx_unlock(dev); __netif_tx_unlock(txq);
local_irq_restore(flags); local_irq_restore(flags);
schedule_delayed_work(&npinfo->tx_work, HZ/10); schedule_delayed_work(&npinfo->tx_work, HZ/10);
return; return;
} }
netif_tx_unlock(dev); __netif_tx_unlock(txq);
local_irq_restore(flags); local_irq_restore(flags);
} }
} }
...@@ -278,17 +280,19 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) ...@@ -278,17 +280,19 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
/* don't get messages out of order, and no recursion */ /* don't get messages out of order, and no recursion */
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
struct netdev_queue *txq;
unsigned long flags; unsigned long flags;
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
local_irq_save(flags); local_irq_save(flags);
/* try until next clock tick */ /* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
tries > 0; --tries) { tries > 0; --tries) {
if (netif_tx_trylock(dev)) { if (__netif_tx_trylock(txq)) {
if (!netif_queue_stopped(dev) && if (!netif_tx_queue_stopped(txq))
!netif_subqueue_stopped(dev, skb))
status = dev->hard_start_xmit(skb, dev); status = dev->hard_start_xmit(skb, dev);
netif_tx_unlock(dev); __netif_tx_unlock(txq);
if (status == NETDEV_TX_OK) if (status == NETDEV_TX_OK)
break; break;
......
...@@ -2123,6 +2123,24 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) ...@@ -2123,6 +2123,24 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
} }
} }
#endif #endif
static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
{
if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) {
__u16 t;
if (pkt_dev->flags & F_QUEUE_MAP_RND) {
t = random32() %
(pkt_dev->queue_map_max -
pkt_dev->queue_map_min + 1)
+ pkt_dev->queue_map_min;
} else {
t = pkt_dev->cur_queue_map + 1;
if (t > pkt_dev->queue_map_max)
t = pkt_dev->queue_map_min;
}
pkt_dev->cur_queue_map = t;
}
}
/* Increment/randomize headers according to flags and current values /* Increment/randomize headers according to flags and current values
* for IP src/dest, UDP src/dst port, MAC-Addr src/dst * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
*/ */
...@@ -2325,19 +2343,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) ...@@ -2325,19 +2343,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
pkt_dev->cur_pkt_size = t; pkt_dev->cur_pkt_size = t;
} }
if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { set_cur_queue_map(pkt_dev);
__u16 t;
if (pkt_dev->flags & F_QUEUE_MAP_RND) {
t = random32() %
(pkt_dev->queue_map_max - pkt_dev->queue_map_min + 1)
+ pkt_dev->queue_map_min;
} else {
t = pkt_dev->cur_queue_map + 1;
if (t > pkt_dev->queue_map_max)
t = pkt_dev->queue_map_min;
}
pkt_dev->cur_queue_map = t;
}
pkt_dev->flows[flow].count++; pkt_dev->flows[flow].count++;
} }
...@@ -2458,7 +2464,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, ...@@ -2458,7 +2464,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
__be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
__be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
__be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
u16 queue_map;
if (pkt_dev->nr_labels) if (pkt_dev->nr_labels)
protocol = htons(ETH_P_MPLS_UC); protocol = htons(ETH_P_MPLS_UC);
...@@ -2469,6 +2475,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, ...@@ -2469,6 +2475,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
/* Update any of the values, used when we're incrementing various /* Update any of the values, used when we're incrementing various
* fields. * fields.
*/ */
queue_map = pkt_dev->cur_queue_map;
mod_cur_headers(pkt_dev); mod_cur_headers(pkt_dev);
datalen = (odev->hard_header_len + 16) & ~0xf; datalen = (odev->hard_header_len + 16) & ~0xf;
...@@ -2507,7 +2514,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, ...@@ -2507,7 +2514,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
skb->network_header = skb->tail; skb->network_header = skb->tail;
skb->transport_header = skb->network_header + sizeof(struct iphdr); skb->transport_header = skb->network_header + sizeof(struct iphdr);
skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); skb_set_queue_mapping(skb, queue_map);
iph = ip_hdr(skb); iph = ip_hdr(skb);
udph = udp_hdr(skb); udph = udp_hdr(skb);
...@@ -2797,6 +2804,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, ...@@ -2797,6 +2804,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
__be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
__be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
__be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
u16 queue_map;
if (pkt_dev->nr_labels) if (pkt_dev->nr_labels)
protocol = htons(ETH_P_MPLS_UC); protocol = htons(ETH_P_MPLS_UC);
...@@ -2807,6 +2815,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, ...@@ -2807,6 +2815,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
/* Update any of the values, used when we're incrementing various /* Update any of the values, used when we're incrementing various
* fields. * fields.
*/ */
queue_map = pkt_dev->cur_queue_map;
mod_cur_headers(pkt_dev); mod_cur_headers(pkt_dev);
skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 +
...@@ -2844,7 +2853,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, ...@@ -2844,7 +2853,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb->network_header = skb->tail; skb->network_header = skb->tail;
skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); skb_set_queue_mapping(skb, queue_map);
iph = ipv6_hdr(skb); iph = ipv6_hdr(skb);
udph = udp_hdr(skb); udph = udp_hdr(skb);
...@@ -3263,7 +3272,9 @@ static void pktgen_rem_thread(struct pktgen_thread *t) ...@@ -3263,7 +3272,9 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
{ {
struct net_device *odev = NULL; struct net_device *odev = NULL;
struct netdev_queue *txq;
__u64 idle_start = 0; __u64 idle_start = 0;
u16 queue_map;
int ret; int ret;
odev = pkt_dev->odev; odev = pkt_dev->odev;
...@@ -3285,9 +3296,15 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3285,9 +3296,15 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
} }
} }
if ((netif_queue_stopped(odev) || if (!pkt_dev->skb) {
(pkt_dev->skb && set_cur_queue_map(pkt_dev);
netif_subqueue_stopped(odev, pkt_dev->skb))) || queue_map = pkt_dev->cur_queue_map;
} else {
queue_map = skb_get_queue_mapping(pkt_dev->skb);
}
txq = netdev_get_tx_queue(odev, queue_map);
if (netif_tx_queue_stopped(txq) ||
need_resched()) { need_resched()) {
idle_start = getCurUs(); idle_start = getCurUs();
...@@ -3303,8 +3320,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3303,8 +3320,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->idle_acc += getCurUs() - idle_start; pkt_dev->idle_acc += getCurUs() - idle_start;
if (netif_queue_stopped(odev) || if (netif_tx_queue_stopped(txq)) {
netif_subqueue_stopped(odev, pkt_dev->skb)) {
pkt_dev->next_tx_us = getCurUs(); /* TODO */ pkt_dev->next_tx_us = getCurUs(); /* TODO */
pkt_dev->next_tx_ns = 0; pkt_dev->next_tx_ns = 0;
goto out; /* Try the next interface */ goto out; /* Try the next interface */
...@@ -3331,9 +3347,12 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3331,9 +3347,12 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
} }
} }
netif_tx_lock_bh(odev); /* fill_packet() might have changed the queue */
if (!netif_queue_stopped(odev) && queue_map = skb_get_queue_mapping(pkt_dev->skb);
!netif_subqueue_stopped(odev, pkt_dev->skb)) { txq = netdev_get_tx_queue(odev, queue_map);
__netif_tx_lock_bh(txq);
if (!netif_tx_queue_stopped(txq)) {
atomic_inc(&(pkt_dev->skb->users)); atomic_inc(&(pkt_dev->skb->users));
retry_now: retry_now:
...@@ -3377,7 +3396,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3377,7 +3396,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->next_tx_ns = 0; pkt_dev->next_tx_ns = 0;
} }
netif_tx_unlock_bh(odev); __netif_tx_unlock_bh(txq);
/* If pkt_dev->count is zero, then run forever */ /* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
......
...@@ -166,7 +166,7 @@ static inline int qdisc_restart(struct netdev_queue *txq) ...@@ -166,7 +166,7 @@ static inline int qdisc_restart(struct netdev_queue *txq)
HARD_TX_LOCK(dev, txq, smp_processor_id()); HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_subqueue_stopped(dev, skb)) if (!netif_subqueue_stopped(dev, skb))
ret = dev_hard_start_xmit(skb, dev); ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq); HARD_TX_UNLOCK(dev, txq);
spin_lock(&txq->lock); spin_lock(&txq->lock);
...@@ -198,11 +198,10 @@ static inline int qdisc_restart(struct netdev_queue *txq) ...@@ -198,11 +198,10 @@ static inline int qdisc_restart(struct netdev_queue *txq)
void __qdisc_run(struct netdev_queue *txq) void __qdisc_run(struct netdev_queue *txq)
{ {
struct net_device *dev = txq->dev;
unsigned long start_time = jiffies; unsigned long start_time = jiffies;
while (qdisc_restart(txq)) { while (qdisc_restart(txq)) {
if (netif_queue_stopped(dev)) if (netif_tx_queue_stopped(txq))
break; break;
/* /*
......
...@@ -295,8 +295,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -295,8 +295,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
slave_txq = netdev_get_tx_queue(slave, 0); slave_txq = netdev_get_tx_queue(slave, 0);
if (slave_txq->qdisc_sleeping != q) if (slave_txq->qdisc_sleeping != q)
continue; continue;
if (netif_queue_stopped(slave) || if (__netif_subqueue_stopped(slave, subq) ||
__netif_subqueue_stopped(slave, subq) ||
!netif_running(slave)) { !netif_running(slave)) {
busy = 1; busy = 1;
continue; continue;
...@@ -305,8 +304,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -305,8 +304,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
switch (teql_resolve(skb, skb_res, slave)) { switch (teql_resolve(skb, skb_res, slave)) {
case 0: case 0:
if (netif_tx_trylock(slave)) { if (netif_tx_trylock(slave)) {
if (!netif_queue_stopped(slave) && if (!__netif_subqueue_stopped(slave, subq) &&
!__netif_subqueue_stopped(slave, subq) &&
slave->hard_start_xmit(skb, slave) == 0) { slave->hard_start_xmit(skb, slave) == 0) {
netif_tx_unlock(slave); netif_tx_unlock(slave);
master->slaves = NEXT_SLAVE(q); master->slaves = NEXT_SLAVE(q);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册