提交 8c8963b2 编写于 作者: D David S. Miller

Merge branch 'axienet-fixes'

Robert Hancock says:

====================
Xilinx axienet fixes

Various fixes for the Xilinx AXI Ethernet driver.

Changed since v2:
-added Reviewed-by tags, added some explanation to commit
messages, no code changes

Changed since v1:
-corrected a Fixes tag to point to mainline commit
-split up reset changes into 3 patches
-added ratelimit on netdev_warn in TX busy case
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -41,8 +41,9 @@ ...@@ -41,8 +41,9 @@
#include "xilinx_axienet.h" #include "xilinx_axienet.h"
/* Descriptors defines for Tx and Rx DMA */ /* Descriptors defines for Tx and Rx DMA */
#define TX_BD_NUM_DEFAULT 64 #define TX_BD_NUM_DEFAULT 128
#define RX_BD_NUM_DEFAULT 1024 #define RX_BD_NUM_DEFAULT 1024
#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
#define TX_BD_NUM_MAX 4096 #define TX_BD_NUM_MAX 4096
#define RX_BD_NUM_MAX 4096 #define RX_BD_NUM_MAX 4096
...@@ -496,7 +497,8 @@ static void axienet_setoptions(struct net_device *ndev, u32 options) ...@@ -496,7 +497,8 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
static int __axienet_device_reset(struct axienet_local *lp) static int __axienet_device_reset(struct axienet_local *lp)
{ {
u32 timeout; u32 value;
int ret;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending * process of Axi DMA takes a while to complete as all pending
...@@ -506,15 +508,23 @@ static int __axienet_device_reset(struct axienet_local *lp) ...@@ -506,15 +508,23 @@ static int __axienet_device_reset(struct axienet_local *lp)
* they both reset the entire DMA core, so only one needs to be used. * they both reset the entire DMA core, so only one needs to be used.
*/ */
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC; ret = read_poll_timeout(axienet_dma_in32, value,
while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) & !(value & XAXIDMA_CR_RESET_MASK),
XAXIDMA_CR_RESET_MASK) { DELAY_OF_ONE_MILLISEC, 50000, false, lp,
udelay(1); XAXIDMA_TX_CR_OFFSET);
if (--timeout == 0) { if (ret) {
netdev_err(lp->ndev, "%s: DMA reset timeout!\n", dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
__func__); return ret;
return -ETIMEDOUT; }
}
/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
ret = read_poll_timeout(axienet_ior, value,
value & XAE_INT_PHYRSTCMPLT_MASK,
DELAY_OF_ONE_MILLISEC, 50000, false, lp,
XAE_IS_OFFSET);
if (ret) {
dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
return ret;
} }
return 0; return 0;
...@@ -623,6 +633,8 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, ...@@ -623,6 +633,8 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
break; break;
/* Ensure we see complete descriptor update */
dma_rmb();
phys = desc_get_phys_addr(lp, cur_p); phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys, dma_unmap_single(ndev->dev.parent, phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
...@@ -631,13 +643,15 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, ...@@ -631,13 +643,15 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
dev_consume_skb_irq(cur_p->skb); dev_consume_skb_irq(cur_p->skb);
cur_p->cntrl = 0;
cur_p->app0 = 0; cur_p->app0 = 0;
cur_p->app1 = 0; cur_p->app1 = 0;
cur_p->app2 = 0; cur_p->app2 = 0;
cur_p->app4 = 0; cur_p->app4 = 0;
cur_p->status = 0;
cur_p->skb = NULL; cur_p->skb = NULL;
/* ensure our transmit path and device don't prematurely see status cleared */
wmb();
cur_p->cntrl = 0;
cur_p->status = 0;
if (sizep) if (sizep)
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
...@@ -646,6 +660,32 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, ...@@ -646,6 +660,32 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
return i; return i;
} }
/**
* axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
* @lp: Pointer to the axienet_local structure
* @num_frag: The number of BDs to check for
*
* Return: 0, on success
* NETDEV_TX_BUSY, if any of the descriptors are not free
*
* This function is invoked before BDs are allocated and transmission starts.
* This function returns 0 if a BD or group of BDs can be allocated for
* transmission. If the BD or any of the BDs are not free the function
* returns a busy status. This is invoked from axienet_start_xmit.
*/
static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
int num_frag)
{
struct axidma_bd *cur_p;
/* Ensure we see all descriptor updates from device or TX IRQ path */
rmb();
cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
if (cur_p->cntrl)
return NETDEV_TX_BUSY;
return 0;
}
/** /**
* axienet_start_xmit_done - Invoked once a transmit is completed by the * axienet_start_xmit_done - Invoked once a transmit is completed by the
* Axi DMA Tx channel. * Axi DMA Tx channel.
...@@ -675,30 +715,8 @@ static void axienet_start_xmit_done(struct net_device *ndev) ...@@ -675,30 +715,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
/* Matches barrier in axienet_start_xmit */ /* Matches barrier in axienet_start_xmit */
smp_mb(); smp_mb();
netif_wake_queue(ndev); if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
} netif_wake_queue(ndev);
/**
* axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
* @lp: Pointer to the axienet_local structure
* @num_frag: The number of BDs to check for
*
* Return: 0, on success
* NETDEV_TX_BUSY, if any of the descriptors are not free
*
* This function is invoked before BDs are allocated and transmission starts.
* This function returns 0 if a BD or group of BDs can be allocated for
* transmission. If the BD or any of the BDs are not free the function
* returns a busy status. This is invoked from axienet_start_xmit.
*/
static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
int num_frag)
{
struct axidma_bd *cur_p;
cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
return NETDEV_TX_BUSY;
return 0;
} }
/** /**
...@@ -730,20 +748,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -730,20 +748,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
num_frag = skb_shinfo(skb)->nr_frags; num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (axienet_check_tx_bd_space(lp, num_frag)) { if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
if (netif_queue_stopped(ndev)) /* Should not happen as last start_xmit call should have
return NETDEV_TX_BUSY; * checked for sufficient space and queue should only be
* woken when sufficient space is available.
*/
netif_stop_queue(ndev); netif_stop_queue(ndev);
if (net_ratelimit())
/* Matches barrier in axienet_start_xmit_done */ netdev_warn(ndev, "TX ring unexpectedly full\n");
smp_mb(); return NETDEV_TX_BUSY;
/* Space might have just been freed - check again */
if (axienet_check_tx_bd_space(lp, num_frag))
return NETDEV_TX_BUSY;
netif_wake_queue(ndev);
} }
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
...@@ -804,6 +817,18 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -804,6 +817,18 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++lp->tx_bd_tail >= lp->tx_bd_num) if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0; lp->tx_bd_tail = 0;
/* Stop queue if next transmit may not have space */
if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
netif_stop_queue(ndev);
/* Matches barrier in axienet_start_xmit_done */
smp_mb();
/* Space might have just been freed - check again */
if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
netif_wake_queue(ndev);
}
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -834,6 +859,8 @@ static void axienet_recv(struct net_device *ndev) ...@@ -834,6 +859,8 @@ static void axienet_recv(struct net_device *ndev)
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
/* Ensure we see complete descriptor update */
dma_rmb();
phys = desc_get_phys_addr(lp, cur_p); phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
...@@ -1352,7 +1379,8 @@ axienet_ethtools_set_ringparam(struct net_device *ndev, ...@@ -1352,7 +1379,8 @@ axienet_ethtools_set_ringparam(struct net_device *ndev,
if (ering->rx_pending > RX_BD_NUM_MAX || if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending || ering->rx_mini_pending ||
ering->rx_jumbo_pending || ering->rx_jumbo_pending ||
ering->rx_pending > TX_BD_NUM_MAX) ering->tx_pending < TX_BD_NUM_MIN ||
ering->tx_pending > TX_BD_NUM_MAX)
return -EINVAL; return -EINVAL;
if (netif_running(ndev)) if (netif_running(ndev))
...@@ -2027,6 +2055,11 @@ static int axienet_probe(struct platform_device *pdev) ...@@ -2027,6 +2055,11 @@ static int axienet_probe(struct platform_device *pdev)
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
/* Reset core now that clocks are enabled, prior to accessing MDIO */
ret = __axienet_device_reset(lp);
if (ret)
goto cleanup_clk;
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) { if (lp->phy_node) {
ret = axienet_mdio_setup(lp); ret = axienet_mdio_setup(lp);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册