提交 28679751 编写于 作者: E Eric Dumazet 提交者: David S. Miller

net: dont update dev->trans_start in 10GB drivers

Followup of commits 9d21493b
and 08baf561
(net: tx scalability works : trans_start)
(net: txq_trans_update() helper)

Now that core network takes care of trans_start updates, dont do it
in drivers themselves, if possible. Multi queue drivers can
avoid one cache miss (on dev->trans_start) in their start_xmit()
handler.

Exceptions are NETIF_F_LLTX drivers (vxge & tehuti)
Signed-off-by: NEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 3f1f39c4
...@@ -478,8 +478,6 @@ static int be_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -478,8 +478,6 @@ static int be_xmit(struct sk_buff *skb, struct net_device *netdev)
be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt); be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt);
netdev->trans_start = jiffies;
be_tx_stats_update(adapter, wrb_cnt, copied, stopped); be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -10617,7 +10617,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -10617,7 +10617,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mmiowb(); mmiowb();
fp->tx_bd_prod += nbd; fp->tx_bd_prod += nbd;
dev->trans_start = jiffies;
if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
/* We want bnx2x_tx_int to "see" the updated tx_bd_prod /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
......
...@@ -1879,7 +1879,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1879,7 +1879,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpl->vlan_valid = 0; cpl->vlan_valid = 0;
send: send:
dev->trans_start = jiffies;
ret = t1_sge_tx(skb, adapter, 0, dev); ret = t1_sge_tx(skb, adapter, 0, dev);
/* If transmit busy, and we reallocated skb's due to headroom limit, /* If transmit busy, and we reallocated skb's due to headroom limit,
......
...@@ -1286,7 +1286,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1286,7 +1286,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (vlan_tx_tag_present(skb) && pi->vlan_grp) if (vlan_tx_tag_present(skb) && pi->vlan_grp)
qs->port_stats[SGE_PSTAT_VLANINS]++; qs->port_stats[SGE_PSTAT_VLANINS]++;
dev->trans_start = jiffies;
spin_unlock(&q->lock); spin_unlock(&q->lock);
/* /*
......
...@@ -661,8 +661,6 @@ static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -661,8 +661,6 @@ static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1) if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1)
netif_stop_queue(netdev); netif_stop_queue(netdev);
netdev->trans_start = jiffies;
spin_unlock_irqrestore(&enic->wq_lock[0], flags); spin_unlock_irqrestore(&enic->wq_lock[0], flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
...@@ -1488,7 +1488,6 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -1488,7 +1488,6 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (count) { if (count) {
ixgb_tx_queue(adapter, count, vlan_id, tx_flags); ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
netdev->trans_start = jiffies;
/* Make sure there is space in the ring for the next send. */ /* Make sure there is space in the ring for the next send. */
ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
......
...@@ -4863,7 +4863,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -4863,7 +4863,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (count) { if (count) {
ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
hdr_len); hdr_len);
netdev->trans_start = jiffies;
ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
} else { } else {
......
...@@ -819,7 +819,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -819,7 +819,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Ring doorbell! */ /* Ring doorbell! */
wmb(); wmb();
writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
dev->trans_start = jiffies;
/* Poll CQ here */ /* Poll CQ here */
mlx4_en_xmit_poll(priv, tx_ind); mlx4_en_xmit_poll(priv, tx_ind);
......
...@@ -2892,7 +2892,6 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2892,7 +2892,6 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
tx->stop_queue++; tx->stop_queue++;
netif_tx_stop_queue(netdev_queue); netif_tx_stop_queue(netdev_queue);
} }
dev->trans_start = jiffies;
return 0; return 0;
abort_linearize: abort_linearize:
......
...@@ -1496,7 +1496,6 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -1496,7 +1496,6 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
netxen_nic_update_cmd_producer(adapter, tx_ring, producer); netxen_nic_update_cmd_producer(adapter, tx_ring, producer);
adapter->stats.xmitcalled++; adapter->stats.xmitcalled++;
netdev->trans_start = jiffies;
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
...@@ -2108,7 +2108,6 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) ...@@ -2108,7 +2108,6 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
wmb(); wmb();
ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
ndev->trans_start = jiffies;
QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n", QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
tx_ring->prod_idx, skb->len); tx_ring->prod_idx, skb->len);
......
...@@ -4299,7 +4299,6 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4299,7 +4299,6 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
s2io_stop_tx_queue(sp, fifo->fifo_no); s2io_stop_tx_queue(sp, fifo->fifo_no);
} }
mac_control->stats_info->sw_stat.mem_allocated += skb->truesize; mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
dev->trans_start = jiffies;
spin_unlock_irqrestore(&fifo->tx_lock, flags); spin_unlock_irqrestore(&fifo->tx_lock, flags);
if (sp->config.intr_type == MSI_X) if (sp->config.intr_type == MSI_X)
......
...@@ -438,6 +438,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) ...@@ -438,6 +438,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
kfree_skb(skb); kfree_skb(skb);
return -EPIPE; return -EPIPE;
} }
efx->net_dev->trans_start = jiffies;
} }
return 0; return 0;
......
...@@ -360,13 +360,6 @@ inline int efx_xmit(struct efx_nic *efx, ...@@ -360,13 +360,6 @@ inline int efx_xmit(struct efx_nic *efx,
/* Map fragments for DMA and add to TX queue */ /* Map fragments for DMA and add to TX queue */
rc = efx_enqueue_skb(tx_queue, skb); rc = efx_enqueue_skb(tx_queue, skb);
if (unlikely(rc != NETDEV_TX_OK))
goto out;
/* Update last TX timer */
efx->net_dev->trans_start = jiffies;
out:
return rc; return rc;
} }
......
...@@ -1718,8 +1718,9 @@ static int bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1718,8 +1718,9 @@ static int bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev)
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
#endif #endif
ndev->trans_start = jiffies; #ifdef BDX_LLTX
ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
#endif
priv->net_stats.tx_packets++; priv->net_stats.tx_packets++;
priv->net_stats.tx_bytes += skb->len; priv->net_stats.tx_bytes += skb->len;
......
...@@ -677,7 +677,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, ...@@ -677,7 +677,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
return VXGE_HW_OK; return VXGE_HW_OK;
} }
/* select a vpath to trasmit the packet */ /* select a vpath to transmit the packet */
static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb, static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
int *do_lock) int *do_lock)
{ {
...@@ -992,7 +992,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -992,7 +992,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN); VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
vxge_hw_fifo_txdl_post(fifo_hw, dtr); vxge_hw_fifo_txdl_post(fifo_hw, dtr);
dev->trans_start = jiffies; #ifdef NETIF_F_LLTX
dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
#endif
spin_unlock_irqrestore(&fifo->tx_lock, flags); spin_unlock_irqrestore(&fifo->tx_lock, flags);
VXGE_COMPLETE_VPATH_TX(fifo); VXGE_COMPLETE_VPATH_TX(fifo);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册