提交 a02bdd42 编写于 作者: S Shahed Shaikh 提交者: David S. Miller

qlcnic: Fix bug in Tx completion path

o Driver is using common tx_clean_lock for all Tx queues. This patch
  adds per queue tx_clean_lock.
o Driver is not updating sw_consumer while processing Tx completion
  when interface is going down. Fixed in this patch.
Signed-off-by: NShahed Shaikh <shahed.shaikh@qlogic.com>
Signed-off-by: NManish Chopra <manish.chopra@qlogic.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 0d68fc4f
...@@ -579,6 +579,8 @@ struct qlcnic_host_tx_ring { ...@@ -579,6 +579,8 @@ struct qlcnic_host_tx_ring {
dma_addr_t phys_addr; dma_addr_t phys_addr;
dma_addr_t hw_cons_phys_addr; dma_addr_t hw_cons_phys_addr;
struct netdev_queue *txq; struct netdev_queue *txq;
/* Lock to protect Tx descriptors cleanup */
spinlock_t tx_clean_lock;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
/* /*
...@@ -1095,7 +1097,6 @@ struct qlcnic_adapter { ...@@ -1095,7 +1097,6 @@ struct qlcnic_adapter {
struct qlcnic_filter_hash rx_fhash; struct qlcnic_filter_hash rx_fhash;
struct list_head vf_mc_list; struct list_head vf_mc_list;
spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock; spinlock_t mac_learn_lock;
/* spinlock for catching rcv filters for eswitch traffic */ /* spinlock for catching rcv filters for eswitch traffic */
spinlock_t rx_mac_learn_lock; spinlock_t rx_mac_learn_lock;
......
...@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, ...@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_skb_frag *buffrag; struct qlcnic_skb_frag *buffrag;
int i, j; int i, j;
spin_lock(&tx_ring->tx_clean_lock);
cmd_buf = tx_ring->cmd_buf_arr; cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) { for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array; buffrag = cmd_buf->frag_array;
...@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, ...@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
} }
cmd_buf++; cmd_buf++;
} }
spin_unlock(&tx_ring->tx_clean_lock);
} }
void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
......
...@@ -782,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, ...@@ -782,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag; struct qlcnic_skb_frag *frag;
if (!spin_trylock(&adapter->tx_clean_lock)) if (!spin_trylock(&tx_ring->tx_clean_lock))
return 1; return 1;
sw_consumer = tx_ring->sw_consumer; sw_consumer = tx_ring->sw_consumer;
...@@ -811,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, ...@@ -811,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
break; break;
} }
tx_ring->sw_consumer = sw_consumer;
if (count && netif_running(netdev)) { if (count && netif_running(netdev)) {
tx_ring->sw_consumer = sw_consumer;
smp_mb(); smp_mb();
if (netif_tx_queue_stopped(tx_ring->txq) && if (netif_tx_queue_stopped(tx_ring->txq) &&
netif_carrier_ok(netdev)) { netif_carrier_ok(netdev)) {
...@@ -838,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, ...@@ -838,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
*/ */
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer); done = (sw_consumer == hw_consumer);
spin_unlock(&adapter->tx_clean_lock);
spin_unlock(&tx_ring->tx_clean_lock);
return done; return done;
} }
......
...@@ -1756,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) ...@@ -1756,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_sriov_vf_check(adapter)) if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb(); smp_mb();
spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev); netif_carrier_off(netdev);
adapter->ahw->linkup = 0; adapter->ahw->linkup = 0;
netif_tx_disable(netdev); netif_tx_disable(netdev);
...@@ -1777,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) ...@@ -1777,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->drv_tx_rings; ring++) for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
spin_unlock(&adapter->tx_clean_lock);
} }
/* Usage: During suspend and firmware recovery module */ /* Usage: During suspend and firmware recovery module */
...@@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, ...@@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
} }
memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr; tx_ring->cmd_buf_arr = cmd_buf_arr;
spin_lock_init(&tx_ring->tx_clean_lock);
} }
if (qlcnic_83xx_check(adapter) || if (qlcnic_83xx_check(adapter) ||
...@@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rwlock_init(&adapter->ahw->crb_lock); rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock); mutex_init(&adapter->ahw->mem_lock);
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list); INIT_LIST_HEAD(&adapter->mac_list);
qlcnic_register_dcb(adapter); qlcnic_register_dcb(adapter);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册