提交 e86cd53a 编写于 作者: N Nicolas Ferre 提交者: David S. Miller

net/macb: better manage tx errors

Handle all TX errors, not only underruns. TX error management is
deferred to a dedicated workqueue.
Reinitialize the TX ring after treating all remaining frames, and
restart the controller when everything has been cleaned up properly.
Napi is not stopped during this task as the driver only handles
napi for RX for now.
With this sequence, we do not need a special check during the xmit
method as the packets will be caught by TX disable during workqueue
execution.
Signed-off-by: NNicolas Ferre <nicolas.ferre@atmel.com>
Tested-by: NJoachim Eastwood <manabian@gmail.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 d1d1b53d
...@@ -44,6 +44,16 @@ ...@@ -44,6 +44,16 @@
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
| MACB_BIT(ISR_ROVR)) | MACB_BIT(ISR_ROVR))
#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
| MACB_BIT(ISR_RLE) \
| MACB_BIT(TXERR))
#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
/*
* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
#define MACB_HALT_TIMEOUT 1230
/* Ring buffer accessors */ /* Ring buffer accessors */
static unsigned int macb_tx_ring_wrap(unsigned int index) static unsigned int macb_tx_ring_wrap(unsigned int index)
...@@ -339,66 +349,113 @@ static void macb_update_stats(struct macb *bp) ...@@ -339,66 +349,113 @@ static void macb_update_stats(struct macb *bp)
*p += __raw_readl(reg); *p += __raw_readl(reg);
} }
static void macb_tx(struct macb *bp) static int macb_halt_tx(struct macb *bp)
{ {
unsigned int tail; unsigned long halt_time, timeout;
unsigned int head; u32 status;
u32 status;
status = macb_readl(bp, TSR); macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
macb_writel(bp, TSR, status);
netdev_vdbg(bp->dev, "macb_tx status = 0x%03lx\n", (unsigned long)status); timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
do {
halt_time = jiffies;
status = macb_readl(bp, TSR);
if (!(status & MACB_BIT(TGO)))
return 0;
if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { usleep_range(10, 250);
int i; } while (time_before(halt_time, timeout));
netdev_err(bp->dev, "TX %s, resetting buffers\n",
status & MACB_BIT(UND) ?
"underrun" : "retry limit exceeded");
/* Transfer ongoing, disable transmitter, to avoid confusion */ return -ETIMEDOUT;
if (status & MACB_BIT(TGO)) }
macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
head = bp->tx_head; static void macb_tx_error_task(struct work_struct *work)
{
struct macb *bp = container_of(work, struct macb, tx_error_task);
struct macb_tx_skb *tx_skb;
struct sk_buff *skb;
unsigned int tail;
/*Mark all the buffer as used to avoid sending a lost buffer*/ netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
for (i = 0; i < TX_RING_SIZE; i++) bp->tx_tail, bp->tx_head);
bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
/* Add wrap bit */ /* Make sure nobody is trying to queue up new packets */
bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); netif_stop_queue(bp->dev);
/* free transmit buffer in upper layer*/ /*
for (tail = bp->tx_tail; tail != head; tail++) { * Stop transmission now
struct macb_tx_skb *tx_skb; * (in case we have just queued new packets)
struct sk_buff *skb; */
if (macb_halt_tx(bp))
/* Just complain for now, reinitializing TX path can be good */
netdev_err(bp->dev, "BUG: halt tx timed out\n");
rmb(); /* No need for the lock here as nobody will interrupt us anymore */
tx_skb = macb_tx_skb(bp, tail); /*
skb = tx_skb->skb; * Treat frames in TX queue including the ones that caused the error.
* Free transmit buffers in upper layer.
*/
for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
struct macb_dma_desc *desc;
u32 ctrl;
dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, desc = macb_tx_desc(bp, tail);
skb->len, DMA_TO_DEVICE); ctrl = desc->ctrl;
tx_skb->skb = NULL; tx_skb = macb_tx_skb(bp, tail);
dev_kfree_skb_irq(skb); skb = tx_skb->skb;
}
bp->tx_head = bp->tx_tail = 0; if (ctrl & MACB_BIT(TX_USED)) {
netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(tail), skb->data);
bp->stats.tx_packets++;
bp->stats.tx_bytes += skb->len;
} else {
/*
* "Buffers exhausted mid-frame" errors may only happen
* if the driver is buggy, so complain loudly about those.
* Statistics are updated by hardware.
*/
if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
netdev_err(bp->dev,
"BUG: TX buffers exhausted mid-frame\n");
/* Enable the transmitter again */ desc->ctrl = ctrl | MACB_BIT(TX_USED);
if (status & MACB_BIT(TGO)) }
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
DMA_TO_DEVICE);
tx_skb->skb = NULL;
dev_kfree_skb(skb);
} }
if (!(status & MACB_BIT(COMP))) /* Make descriptor updates visible to hardware */
/* wmb();
* This may happen when a buffer becomes complete
* between reading the ISR and scanning the /* Reinitialize the TX desc queue */
* descriptors. Nothing to worry about. macb_writel(bp, TBQP, bp->tx_ring_dma);
*/ /* Make TX ring reflect state of hardware */
return; bp->tx_head = bp->tx_tail = 0;
/* Now we are ready to start transmission again */
netif_wake_queue(bp->dev);
/* Housework before enabling TX IRQ */
macb_writel(bp, TSR, macb_readl(bp, TSR));
macb_writel(bp, IER, MACB_TX_INT_FLAGS);
}
static void macb_tx_interrupt(struct macb *bp)
{
unsigned int tail;
unsigned int head;
u32 status;
status = macb_readl(bp, TSR);
macb_writel(bp, TSR, status);
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
(unsigned long)status);
head = bp->tx_head; head = bp->tx_head;
for (tail = bp->tx_tail; tail != head; tail++) { for (tail = bp->tx_tail; tail != head; tail++) {
...@@ -638,9 +695,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) ...@@ -638,9 +695,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
} }
} }
if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) | if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
MACB_BIT(ISR_RLE))) macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
macb_tx(bp); schedule_work(&bp->tx_error_task);
break;
}
if (status & MACB_BIT(TCOMP))
macb_tx_interrupt(bp);
/* /*
* Link change detection isn't possible with RMII, so we'll * Link change detection isn't possible with RMII, so we'll
...@@ -970,13 +1032,8 @@ static void macb_init_hw(struct macb *bp) ...@@ -970,13 +1032,8 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
/* Enable interrupts */ /* Enable interrupts */
macb_writel(bp, IER, (MACB_BIT(RCOMP) macb_writel(bp, IER, (MACB_RX_INT_FLAGS
| MACB_BIT(RXUBR) | MACB_TX_INT_FLAGS
| MACB_BIT(ISR_TUND)
| MACB_BIT(ISR_RLE)
| MACB_BIT(TXERR)
| MACB_BIT(TCOMP)
| MACB_BIT(ISR_ROVR)
| MACB_BIT(HRESP))); | MACB_BIT(HRESP)));
} }
...@@ -1428,6 +1485,7 @@ static int __init macb_probe(struct platform_device *pdev) ...@@ -1428,6 +1485,7 @@ static int __init macb_probe(struct platform_device *pdev)
bp->dev = dev; bp->dev = dev;
spin_lock_init(&bp->lock); spin_lock_init(&bp->lock);
INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
bp->pclk = clk_get(&pdev->dev, "pclk"); bp->pclk = clk_get(&pdev->dev, "pclk");
if (IS_ERR(bp->pclk)) { if (IS_ERR(bp->pclk)) {
......
...@@ -538,6 +538,7 @@ struct macb { ...@@ -538,6 +538,7 @@ struct macb {
struct clk *hclk; struct clk *hclk;
struct net_device *dev; struct net_device *dev;
struct napi_struct napi; struct napi_struct napi;
struct work_struct tx_error_task;
struct net_device_stats stats; struct net_device_stats stats;
union { union {
struct macb_stats macb; struct macb_stats macb;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册