提交 d2c7ddd6 编写于 作者: D David S. Miller

[NET]: Fix TX timeout regression in Intel drivers.

This fixes a regression added by changeset
53e52c72 ("[NET]: Make ->poll()
breakout consistent in Intel ethernet drivers.")

As pointed out by Jesse Brandeburg, for three of the drivers edited
above there is breakout logic in the *_clean_tx_irq() code to prevent
running TX reclaim forever.  If this occurs, we have to elide NAPI
poll completion or else those TX events will never be serviced.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
Acked-by: NJesse Brandeburg <jesse.brandeburg@intel.com>
上级 d8c89eb3
...@@ -3919,7 +3919,7 @@ e1000_clean(struct napi_struct *napi, int budget) ...@@ -3919,7 +3919,7 @@ e1000_clean(struct napi_struct *napi, int budget)
{ {
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
struct net_device *poll_dev = adapter->netdev; struct net_device *poll_dev = adapter->netdev;
int work_done = 0; int tx_cleaned = 0, work_done = 0;
/* Must NOT use netdev_priv macro here. */ /* Must NOT use netdev_priv macro here. */
adapter = poll_dev->priv; adapter = poll_dev->priv;
...@@ -3929,14 +3929,17 @@ e1000_clean(struct napi_struct *napi, int budget) ...@@ -3929,14 +3929,17 @@ e1000_clean(struct napi_struct *napi, int budget)
* simultaneously. A failure obtaining the lock means * simultaneously. A failure obtaining the lock means
* tx_ring[0] is currently being cleaned anyway. */ * tx_ring[0] is currently being cleaned anyway. */
if (spin_trylock(&adapter->tx_queue_lock)) { if (spin_trylock(&adapter->tx_queue_lock)) {
e1000_clean_tx_irq(adapter, tx_cleaned = e1000_clean_tx_irq(adapter,
&adapter->tx_ring[0]); &adapter->tx_ring[0]);
spin_unlock(&adapter->tx_queue_lock); spin_unlock(&adapter->tx_queue_lock);
} }
adapter->clean_rx(adapter, &adapter->rx_ring[0], adapter->clean_rx(adapter, &adapter->rx_ring[0],
&work_done, budget); &work_done, budget);
if (tx_cleaned)
work_done = budget;
/* If budget not fully consumed, exit the polling mode */ /* If budget not fully consumed, exit the polling mode */
if (work_done < budget) { if (work_done < budget) {
if (likely(adapter->itr_setting & 3)) if (likely(adapter->itr_setting & 3))
......
...@@ -1384,7 +1384,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) ...@@ -1384,7 +1384,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
{ {
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
struct net_device *poll_dev = adapter->netdev; struct net_device *poll_dev = adapter->netdev;
int work_done = 0; int tx_cleaned = 0, work_done = 0;
/* Must NOT use netdev_priv macro here. */ /* Must NOT use netdev_priv macro here. */
adapter = poll_dev->priv; adapter = poll_dev->priv;
...@@ -1394,12 +1394,15 @@ static int e1000_clean(struct napi_struct *napi, int budget) ...@@ -1394,12 +1394,15 @@ static int e1000_clean(struct napi_struct *napi, int budget)
* simultaneously. A failure obtaining the lock means * simultaneously. A failure obtaining the lock means
* tx_ring is currently being cleaned anyway. */ * tx_ring is currently being cleaned anyway. */
if (spin_trylock(&adapter->tx_queue_lock)) { if (spin_trylock(&adapter->tx_queue_lock)) {
e1000_clean_tx_irq(adapter); tx_cleaned = e1000_clean_tx_irq(adapter);
spin_unlock(&adapter->tx_queue_lock); spin_unlock(&adapter->tx_queue_lock);
} }
adapter->clean_rx(adapter, &work_done, budget); adapter->clean_rx(adapter, &work_done, budget);
if (tx_cleaned)
work_done = budget;
/* If budget not fully consumed, exit the polling mode */ /* If budget not fully consumed, exit the polling mode */
if (work_done < budget) { if (work_done < budget) {
if (adapter->itr_setting & 3) if (adapter->itr_setting & 3)
......
...@@ -1468,13 +1468,16 @@ static int ixgbe_clean(struct napi_struct *napi, int budget) ...@@ -1468,13 +1468,16 @@ static int ixgbe_clean(struct napi_struct *napi, int budget)
struct ixgbe_adapter *adapter = container_of(napi, struct ixgbe_adapter *adapter = container_of(napi,
struct ixgbe_adapter, napi); struct ixgbe_adapter, napi);
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
int work_done = 0; int tx_cleaned = 0, work_done = 0;
/* In non-MSIX case, there is no multi-Tx/Rx queue */ /* In non-MSIX case, there is no multi-Tx/Rx queue */
ixgbe_clean_tx_irq(adapter, adapter->tx_ring); tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done, ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done,
budget); budget);
if (tx_cleaned)
work_done = budget;
/* If budget not fully consumed, exit the polling mode */ /* If budget not fully consumed, exit the polling mode */
if (work_done < budget) { if (work_done < budget) {
netif_rx_complete(netdev, napi); netif_rx_complete(netdev, napi);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册