提交 44c8bc3c 编写于 作者: F Florian Fainelli 提交者: David S. Miller

net: bcmgenet: log RX buffer allocation and RX/TX dma failures

To help troubleshoot heavy memory pressure conditions, add a bunch of
statistics counter to log RX buffer allocation and RX/TX DMA mapping
failures. These are reported like any other counters through the ethtool
stats interface.
Signed-off-by: NFlorian Fainelli <f.fainelli@gmail.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 60b4ea17
...@@ -613,6 +613,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { ...@@ -613,6 +613,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
UMAC_RBUF_OVFL_CNT), UMAC_RBUF_OVFL_CNT),
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
}; };
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
...@@ -989,6 +992,7 @@ static int bcmgenet_xmit_single(struct net_device *dev, ...@@ -989,6 +992,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping); ret = dma_mapping_error(kdev, mapping);
if (ret) { if (ret) {
priv->mib.tx_dma_failed++;
netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
dev_kfree_skb(skb); dev_kfree_skb(skb);
return ret; return ret;
...@@ -1035,6 +1039,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev, ...@@ -1035,6 +1039,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
skb_frag_size(frag), DMA_TO_DEVICE); skb_frag_size(frag), DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping); ret = dma_mapping_error(kdev, mapping);
if (ret) { if (ret) {
priv->mib.tx_dma_failed++;
netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
__func__); __func__);
return ret; return ret;
...@@ -1231,6 +1236,7 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) ...@@ -1231,6 +1236,7 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
priv->rx_buf_len, DMA_FROM_DEVICE); priv->rx_buf_len, DMA_FROM_DEVICE);
ret = dma_mapping_error(kdev, mapping); ret = dma_mapping_error(kdev, mapping);
if (ret) { if (ret) {
priv->mib.rx_dma_failed++;
bcmgenet_free_cb(cb); bcmgenet_free_cb(cb);
netif_err(priv, rx_err, priv->dev, netif_err(priv, rx_err, priv->dev,
"%s DMA map failed\n", __func__); "%s DMA map failed\n", __func__);
...@@ -1397,8 +1403,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, ...@@ -1397,8 +1403,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
/* refill RX path on the current control block */ /* refill RX path on the current control block */
refill: refill:
err = bcmgenet_rx_refill(priv, cb); err = bcmgenet_rx_refill(priv, cb);
if (err) if (err) {
priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, dev, "Rx refill failed\n"); netif_err(priv, rx_err, dev, "Rx refill failed\n");
}
rxpktprocessed++; rxpktprocessed++;
priv->rx_read_ptr++; priv->rx_read_ptr++;
......
...@@ -143,6 +143,9 @@ struct bcmgenet_mib_counters { ...@@ -143,6 +143,9 @@ struct bcmgenet_mib_counters {
u32 rbuf_ovflow_cnt; u32 rbuf_ovflow_cnt;
u32 rbuf_err_cnt; u32 rbuf_err_cnt;
u32 mdf_err_cnt; u32 mdf_err_cnt;
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
u32 tx_dma_failed;
}; };
#define UMAC_HD_BKP_CTRL 0x004 #define UMAC_HD_BKP_CTRL 0x004
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册