提交 9d08da96 编写于 作者: D David S. Miller

Merge branch 'sh_eth'

Ben Hutchings says:

====================
Fixes for sh_eth #2

I'm continuing review and testing of Ethernet support on the R-Car H2
chip.  This series fixes more of the issues I've found, but it won't be
the last set.

These are not tested on any of the other supported chips.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -1316,8 +1316,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) ...@@ -1316,8 +1316,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
RFLR); RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
if (start) if (start) {
mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
}
/* PAUSE Prohibition */ /* PAUSE Prohibition */
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
...@@ -1653,7 +1655,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) ...@@ -1653,7 +1655,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
else else
goto other_irq; goto out;
if (!likely(mdp->irq_enabled)) {
sh_eth_write(ndev, 0, EESIPR);
goto out;
}
if (intr_status & EESR_RX_CHECK) { if (intr_status & EESR_RX_CHECK) {
if (napi_schedule_prep(&mdp->napi)) { if (napi_schedule_prep(&mdp->napi)) {
...@@ -1684,7 +1691,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) ...@@ -1684,7 +1691,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
sh_eth_error(ndev, intr_status); sh_eth_error(ndev, intr_status);
} }
other_irq: out:
spin_unlock(&mdp->lock); spin_unlock(&mdp->lock);
return ret; return ret;
...@@ -1712,7 +1719,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget) ...@@ -1712,7 +1719,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
napi_complete(napi); napi_complete(napi);
/* Reenable Rx interrupts */ /* Reenable Rx interrupts */
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); if (mdp->irq_enabled)
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
out: out:
return budget - quota; return budget - quota;
} }
...@@ -1968,40 +1976,52 @@ static int sh_eth_set_ringparam(struct net_device *ndev, ...@@ -1968,40 +1976,52 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
return -EINVAL; return -EINVAL;
if (netif_running(ndev)) { if (netif_running(ndev)) {
netif_device_detach(ndev);
netif_tx_disable(ndev); netif_tx_disable(ndev);
/* Disable interrupts by clearing the interrupt mask. */
/* Serialise with the interrupt handler and NAPI, then
* disable interrupts. We have to clear the
* irq_enabled flag first to ensure that interrupts
* won't be re-enabled.
*/
mdp->irq_enabled = false;
synchronize_irq(ndev->irq);
napi_synchronize(&mdp->napi);
sh_eth_write(ndev, 0x0000, EESIPR); sh_eth_write(ndev, 0x0000, EESIPR);
/* Stop the chip's Tx and Rx processes. */ /* Stop the chip's Tx and Rx processes. */
sh_eth_write(ndev, 0, EDTRR); sh_eth_write(ndev, 0, EDTRR);
sh_eth_write(ndev, 0, EDRRR); sh_eth_write(ndev, 0, EDRRR);
synchronize_irq(ndev->irq);
}
/* Free all the skbuffs in the Rx queue. */ /* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev); sh_eth_ring_free(ndev);
/* Free DMA buffer */ /* Free DMA buffer */
sh_eth_free_dma_buffer(mdp); sh_eth_free_dma_buffer(mdp);
}
/* Set new parameters */ /* Set new parameters */
mdp->num_rx_ring = ring->rx_pending; mdp->num_rx_ring = ring->rx_pending;
mdp->num_tx_ring = ring->tx_pending; mdp->num_tx_ring = ring->tx_pending;
ret = sh_eth_ring_init(ndev);
if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
return ret;
}
ret = sh_eth_dev_init(ndev, false);
if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
return ret;
}
if (netif_running(ndev)) { if (netif_running(ndev)) {
ret = sh_eth_ring_init(ndev);
if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
__func__);
return ret;
}
ret = sh_eth_dev_init(ndev, false);
if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
__func__);
return ret;
}
mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* Setting the Rx mode will start the Rx process. */ /* Setting the Rx mode will start the Rx process. */
sh_eth_write(ndev, EDRRR_R, EDRRR); sh_eth_write(ndev, EDRRR_R, EDRRR);
netif_wake_queue(ndev); netif_device_attach(ndev);
} }
return 0; return 0;
...@@ -2117,6 +2137,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -2117,6 +2137,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
} }
spin_unlock_irqrestore(&mdp->lock, flags); spin_unlock_irqrestore(&mdp->lock, flags);
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
entry = mdp->cur_tx % mdp->num_tx_ring; entry = mdp->cur_tx % mdp->num_tx_ring;
mdp->tx_skbuff[entry] = skb; mdp->tx_skbuff[entry] = skb;
txdesc = &mdp->tx_ring[entry]; txdesc = &mdp->tx_ring[entry];
...@@ -2126,10 +2149,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -2126,10 +2149,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb->len + 2); skb->len + 2);
txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (skb->len < ETH_ZLEN) txdesc->buffer_length = skb->len;
txdesc->buffer_length = ETH_ZLEN;
else
txdesc->buffer_length = skb->len;
if (entry >= mdp->num_tx_ring - 1) if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
...@@ -2181,7 +2201,13 @@ static int sh_eth_close(struct net_device *ndev) ...@@ -2181,7 +2201,13 @@ static int sh_eth_close(struct net_device *ndev)
netif_stop_queue(ndev); netif_stop_queue(ndev);
/* Disable interrupts by clearing the interrupt mask. */ /* Serialise with the interrupt handler and NAPI, then disable
* interrupts. We have to clear the irq_enabled flag first to
* ensure that interrupts won't be re-enabled.
*/
mdp->irq_enabled = false;
synchronize_irq(ndev->irq);
napi_disable(&mdp->napi);
sh_eth_write(ndev, 0x0000, EESIPR); sh_eth_write(ndev, 0x0000, EESIPR);
/* Stop the chip's Tx and Rx processes. */ /* Stop the chip's Tx and Rx processes. */
...@@ -2198,8 +2224,6 @@ static int sh_eth_close(struct net_device *ndev) ...@@ -2198,8 +2224,6 @@ static int sh_eth_close(struct net_device *ndev)
free_irq(ndev->irq, ndev); free_irq(ndev->irq, ndev);
napi_disable(&mdp->napi);
/* Free all the skbuffs in the Rx queue. */ /* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev); sh_eth_ring_free(ndev);
......
...@@ -513,6 +513,7 @@ struct sh_eth_private { ...@@ -513,6 +513,7 @@ struct sh_eth_private {
u32 rx_buf_sz; /* Based on MTU+slack. */ u32 rx_buf_sz; /* Based on MTU+slack. */
int edmac_endian; int edmac_endian;
struct napi_struct napi; struct napi_struct napi;
bool irq_enabled;
/* MII transceiver section. */ /* MII transceiver section. */
u32 phy_id; /* PHY ID */ u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */ struct mii_bus *mii_bus; /* MDIO bus control */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册