提交 48374ddc 编写于 作者: Y Yevgeny Petrilin 提交者: David S. Miller

mlx4_en: Removed TX locking when polling TX cq

There is no need to synchronize the polling with the transmit
function. The only place to synchronize is when we process
the cq from the transmit function. Also removed spin_lock_irq,
and using spin_trylock, if somebody else is already processing the cq,
no need to wait for it to finish.
Signed-off-by: NYevgeny Petrilin <yevgenyp@mellanox.co.il>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 b51968d6
...@@ -404,14 +404,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq) ...@@ -404,14 +404,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
struct mlx4_en_priv *priv = netdev_priv(cq->dev); struct mlx4_en_priv *priv = netdev_priv(cq->dev);
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
spin_lock_irq(&ring->comp_lock);
cq->armed = 0; cq->armed = 0;
if (!spin_trylock(&ring->comp_lock))
return;
mlx4_en_process_tx_cq(cq->dev, cq); mlx4_en_process_tx_cq(cq->dev, cq);
if (ring->blocked) mod_timer(&cq->timer, jiffies + 1);
mlx4_en_arm_cq(priv, cq); spin_unlock(&ring->comp_lock);
else
mod_timer(&cq->timer, jiffies + 1);
spin_unlock_irq(&ring->comp_lock);
} }
...@@ -424,8 +422,10 @@ void mlx4_en_poll_tx_cq(unsigned long data) ...@@ -424,8 +422,10 @@ void mlx4_en_poll_tx_cq(unsigned long data)
INC_PERF_COUNTER(priv->pstats.tx_poll); INC_PERF_COUNTER(priv->pstats.tx_poll);
netif_tx_lock(priv->dev); if (!spin_trylock(&ring->comp_lock)) {
spin_lock_irq(&ring->comp_lock); mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
return;
}
mlx4_en_process_tx_cq(cq->dev, cq); mlx4_en_process_tx_cq(cq->dev, cq);
inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
...@@ -435,8 +435,7 @@ void mlx4_en_poll_tx_cq(unsigned long data) ...@@ -435,8 +435,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
if (inflight && priv->port_up) if (inflight && priv->port_up)
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
spin_unlock_irq(&ring->comp_lock); spin_unlock(&ring->comp_lock);
netif_tx_unlock(priv->dev);
} }
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
...@@ -479,7 +478,10 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) ...@@ -479,7 +478,10 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
mlx4_en_process_tx_cq(priv->dev, cq); if (spin_trylock(&ring->comp_lock)) {
mlx4_en_process_tx_cq(priv->dev, cq);
spin_unlock(&ring->comp_lock);
}
} }
static void *get_frag_ptr(struct sk_buff *skb) static void *get_frag_ptr(struct sk_buff *skb)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册