提交 7a9905e6 编写于 作者: R Rajesh Borundia 提交者: David S. Miller

netxen: fix race in tx stop queue

There is race between netif_stop_queue and netif_stopped_queue
check.So check once again if buffers are available to avoid race.
With above logic we can also get rid of tx lock in process_cmd_ring.
Signed-off-by: NRajesh Borundia <rajesh.borundia@qlogic.com>
Signed-off-by: NAmit Kumar Salecha <amit.salecha@qlogic.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 3666e0b0
...@@ -175,7 +175,10 @@ ...@@ -175,7 +175,10 @@
#define MAX_NUM_CARDS 4 #define MAX_NUM_CARDS 4
#define MAX_BUFFERS_PER_CMD 32 #define MAX_BUFFERS_PER_CMD 32
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4) #define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ MGMT_CMD_DESC_RESV)
#define NX_MAX_TX_TIMEOUTS 2 #define NX_MAX_TX_TIMEOUTS 2
/* /*
......
...@@ -598,8 +598,14 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, ...@@ -598,8 +598,14 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
if (nr_desc >= netxen_tx_avail(tx_ring)) { if (nr_desc >= netxen_tx_avail(tx_ring)) {
netif_tx_stop_queue(tx_ring->txq); netif_tx_stop_queue(tx_ring->txq);
__netif_tx_unlock_bh(tx_ring->txq); smp_mb();
return -EBUSY; if (netxen_tx_avail(tx_ring) > nr_desc) {
if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
netif_tx_wake_queue(tx_ring->txq);
} else {
__netif_tx_unlock_bh(tx_ring->txq);
return -EBUSY;
}
} }
do { do {
......
...@@ -1763,14 +1763,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) ...@@ -1763,14 +1763,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
smp_mb(); smp_mb();
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
__netif_tx_lock(tx_ring->txq, smp_processor_id()); if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_wake_queue(netdev); netif_wake_queue(netdev);
adapter->tx_timeo_cnt = 0; adapter->tx_timeo_cnt = 0;
}
__netif_tx_unlock(tx_ring->txq);
}
} }
/* /*
* If everything is freed up to consumer then check if the ring is full * If everything is freed up to consumer then check if the ring is full
......
...@@ -125,11 +125,6 @@ netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, ...@@ -125,11 +125,6 @@ netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
struct nx_host_tx_ring *tx_ring) struct nx_host_tx_ring *tx_ring)
{ {
NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
if (netxen_tx_avail(tx_ring) <= TX_STOP_THRESH) {
netif_stop_queue(adapter->netdev);
smp_mb();
}
} }
static uint32_t crb_cmd_consumer[4] = { static uint32_t crb_cmd_consumer[4] = {
...@@ -1209,7 +1204,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter, ...@@ -1209,7 +1204,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
adapter->max_mc_count = 16; adapter->max_mc_count = 16;
netdev->netdev_ops = &netxen_netdev_ops; netdev->netdev_ops = &netxen_netdev_ops;
netdev->watchdog_timeo = 2*HZ; netdev->watchdog_timeo = 5*HZ;
netxen_nic_change_mtu(netdev, netdev->mtu); netxen_nic_change_mtu(netdev, netdev->mtu);
...@@ -1825,9 +1820,13 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -1825,9 +1820,13 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* 4 fragments per cmd des */ /* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2; no_of_desc = (frag_count + 3) >> 2;
if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) { if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
netif_stop_queue(netdev); netif_stop_queue(netdev);
return NETDEV_TX_BUSY; smp_mb();
if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
netif_start_queue(netdev);
else
return NETDEV_TX_BUSY;
} }
producer = tx_ring->producer; producer = tx_ring->producer;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册