提交 33fdef24 编写于 作者: C Claudiu Beznea 提交者: David S. Miller

net: macb: free resources on failure path of at91ether_open()

DMA buffers were not freed on failure path of at91ether_open().
Along with changes for freeing the DMA buffers the enable/disable
interrupt instructions were moved to at91ether_start()/at91ether_stop()
functions and the operations on at91ether_stop() were done in
their reverse order (compared with how is done in at91ether_start()):
before this patch the operation order on interface open path
was as follows:
1/ alloc DMA buffers
2/ enable tx, rx
3/ enable interrupts
and the order on interface close path was as follows:
1/ disable tx, rx
2/ disable interrupts
3/ free dma buffers.

Fixes: 7897b071 ("net: macb: convert to phylink")
Signed-off-by: NClaudiu Beznea <claudiu.beznea@microchip.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 0eaf228d
...@@ -3762,15 +3762,9 @@ static int macb_init(struct platform_device *pdev) ...@@ -3762,15 +3762,9 @@ static int macb_init(struct platform_device *pdev)
static struct sifive_fu540_macb_mgmt *mgmt; static struct sifive_fu540_macb_mgmt *mgmt;
/* Initialize and start the Receiver and Transmit subsystems */ static int at91ether_alloc_coherent(struct macb *lp)
static int at91ether_start(struct net_device *dev)
{ {
struct macb *lp = netdev_priv(dev);
struct macb_queue *q = &lp->queues[0]; struct macb_queue *q = &lp->queues[0];
struct macb_dma_desc *desc;
dma_addr_t addr;
u32 ctl;
int i;
q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR * (AT91ETHER_MAX_RX_DESCR *
...@@ -3792,6 +3786,43 @@ static int at91ether_start(struct net_device *dev) ...@@ -3792,6 +3786,43 @@ static int at91ether_start(struct net_device *dev)
return -ENOMEM; return -ENOMEM;
} }
return 0;
}
static void at91ether_free_coherent(struct macb *lp)
{
struct macb_queue *q = &lp->queues[0];
if (q->rx_ring) {
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
q->rx_ring, q->rx_ring_dma);
q->rx_ring = NULL;
}
if (q->rx_buffers) {
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
AT91ETHER_MAX_RBUFF_SZ,
q->rx_buffers, q->rx_buffers_dma);
q->rx_buffers = NULL;
}
}
/* Initialize and start the Receiver and Transmit subsystems */
static int at91ether_start(struct macb *lp)
{
struct macb_queue *q = &lp->queues[0];
struct macb_dma_desc *desc;
dma_addr_t addr;
u32 ctl;
int i, ret;
ret = at91ether_alloc_coherent(lp);
if (ret)
return ret;
addr = q->rx_buffers_dma; addr = q->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
desc = macb_rx_desc(q, i); desc = macb_rx_desc(q, i);
...@@ -3813,9 +3844,39 @@ static int at91ether_start(struct net_device *dev) ...@@ -3813,9 +3844,39 @@ static int at91ether_start(struct net_device *dev)
ctl = macb_readl(lp, NCR); ctl = macb_readl(lp, NCR);
macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
/* Enable MAC interrupts */
macb_writel(lp, IER, MACB_BIT(RCOMP) |
MACB_BIT(RXUBR) |
MACB_BIT(ISR_TUND) |
MACB_BIT(ISR_RLE) |
MACB_BIT(TCOMP) |
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
return 0; return 0;
} }
static void at91ether_stop(struct macb *lp)
{
u32 ctl;
/* Disable MAC interrupts */
macb_writel(lp, IDR, MACB_BIT(RCOMP) |
MACB_BIT(RXUBR) |
MACB_BIT(ISR_TUND) |
MACB_BIT(ISR_RLE) |
MACB_BIT(TCOMP) |
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
/* Disable Receiver and Transmitter */
ctl = macb_readl(lp, NCR);
macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
/* Free resources. */
at91ether_free_coherent(lp);
}
/* Open the ethernet interface */ /* Open the ethernet interface */
static int at91ether_open(struct net_device *dev) static int at91ether_open(struct net_device *dev)
{ {
...@@ -3835,27 +3896,20 @@ static int at91ether_open(struct net_device *dev) ...@@ -3835,27 +3896,20 @@ static int at91ether_open(struct net_device *dev)
macb_set_hwaddr(lp); macb_set_hwaddr(lp);
ret = at91ether_start(dev); ret = at91ether_start(lp);
if (ret) if (ret)
goto pm_exit; goto pm_exit;
/* Enable MAC interrupts */
macb_writel(lp, IER, MACB_BIT(RCOMP) |
MACB_BIT(RXUBR) |
MACB_BIT(ISR_TUND) |
MACB_BIT(ISR_RLE) |
MACB_BIT(TCOMP) |
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
ret = macb_phylink_connect(lp); ret = macb_phylink_connect(lp);
if (ret) if (ret)
goto pm_exit; goto stop;
netif_start_queue(dev); netif_start_queue(dev);
return 0; return 0;
stop:
at91ether_stop(lp);
pm_exit: pm_exit:
pm_runtime_put_sync(&lp->pdev->dev); pm_runtime_put_sync(&lp->pdev->dev);
return ret; return ret;
...@@ -3865,37 +3919,13 @@ static int at91ether_open(struct net_device *dev) ...@@ -3865,37 +3919,13 @@ static int at91ether_open(struct net_device *dev)
static int at91ether_close(struct net_device *dev) static int at91ether_close(struct net_device *dev)
{ {
struct macb *lp = netdev_priv(dev); struct macb *lp = netdev_priv(dev);
struct macb_queue *q = &lp->queues[0];
u32 ctl;
/* Disable Receiver and Transmitter */
ctl = macb_readl(lp, NCR);
macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
/* Disable MAC interrupts */
macb_writel(lp, IDR, MACB_BIT(RCOMP) |
MACB_BIT(RXUBR) |
MACB_BIT(ISR_TUND) |
MACB_BIT(ISR_RLE) |
MACB_BIT(TCOMP) |
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
netif_stop_queue(dev); netif_stop_queue(dev);
phylink_stop(lp->phylink); phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink); phylink_disconnect_phy(lp->phylink);
dma_free_coherent(&lp->pdev->dev, at91ether_stop(lp);
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
q->rx_ring, q->rx_ring_dma);
q->rx_ring = NULL;
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
q->rx_buffers, q->rx_buffers_dma);
q->rx_buffers = NULL;
return pm_runtime_put(&lp->pdev->dev); return pm_runtime_put(&lp->pdev->dev);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册