提交 29896a67 编写于 作者: G Giuseppe CAVALLARO 提交者: David S. Miller

stmmac: fix chained mode

This patch is to fix the chain mode that was broken
and generated a panic. This patch reviews the chain/ring
modes now shaing the same structure and taking care
about the pointers and callbacks.
Signed-off-by: NGiuseppe Cavallaro <peppe.cavallaro@st.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 d916701c
......@@ -151,7 +151,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
sizeof(struct dma_desc)));
}
const struct stmmac_chain_mode_ops chain_mode_ops = {
const struct stmmac_mode_ops chain_mode_ops = {
.init = stmmac_init_dma_chain,
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
......
......@@ -419,20 +419,13 @@ struct mii_regs {
unsigned int data; /* MII Data */
};
struct stmmac_ring_mode_ops {
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
void (*refill_desc3) (void *priv, struct dma_desc *p);
void (*init_desc3) (struct dma_desc *p);
void (*clean_desc3) (void *priv, struct dma_desc *p);
int (*set_16kib_bfsize) (int mtu);
};
struct stmmac_chain_mode_ops {
struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
int (*set_16kib_bfsize)(int mtu);
void (*init_desc3)(struct dma_desc *p);
void (*refill_desc3) (void *priv, struct dma_desc *p);
void (*clean_desc3) (void *priv, struct dma_desc *p);
};
......@@ -441,8 +434,7 @@ struct mac_device_info {
const struct stmmac_ops *mac;
const struct stmmac_desc_ops *desc;
const struct stmmac_dma_ops *dma;
const struct stmmac_ring_mode_ops *ring;
const struct stmmac_chain_mode_ops *chain;
const struct stmmac_mode_ops *mode;
const struct stmmac_hwtimestamp *ptp;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
......@@ -460,7 +452,7 @@ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
void stmmac_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
extern const struct stmmac_chain_mode_ops chain_mode_ops;
extern const struct stmmac_mode_ops ring_mode_ops;
extern const struct stmmac_mode_ops chain_mode_ops;
#endif /* __COMMON_H__ */
......@@ -100,7 +100,6 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
{
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
if (unlikely(priv->plat->has_gmac))
/* Fill DES3 in case of RING mode */
if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
p->des3 = p->des2 + BUF_SIZE_8KiB;
......@@ -126,7 +125,7 @@ static int stmmac_set_16kib_bfsize(int mtu)
return ret;
}
const struct stmmac_ring_mode_ops ring_mode_ops = {
const struct stmmac_mode_ops ring_mode_ops = {
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3,
......
......@@ -966,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
p->des2 = priv->rx_skbuff_dma[i];
if ((priv->mode == STMMAC_RING_MODE) &&
if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
priv->hw->ring->init_desc3(p);
priv->hw->mode->init_desc3(p);
return 0;
}
......@@ -999,11 +999,8 @@ static int init_dma_desc_rings(struct net_device *dev)
unsigned int bfsize = 0;
int ret = -ENOMEM;
/* Set the max buffer size according to the DESC mode
* and the MTU. Note that RING mode allows 16KiB bsize.
*/
if (priv->mode == STMMAC_RING_MODE)
bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
if (priv->hw->mode->set_16kib_bfsize)
bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
......@@ -1044,14 +1041,14 @@ static int init_dma_desc_rings(struct net_device *dev)
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc) {
priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
rxsize, 1);
priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
txsize, 1);
} else {
priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
rxsize, 0);
priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
txsize, 0);
}
}
......@@ -1303,7 +1300,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = 0;
}
priv->hw->ring->clean_desc3(priv, p);
priv->hw->mode->clean_desc3(priv, p);
if (likely(skb != NULL)) {
dev_kfree_skb(skb);
......@@ -1859,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first;
unsigned int nopaged_len = skb_headlen(skb);
unsigned int enh_desc = priv->plat->enh_desc;
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
if (!netif_queue_stopped(dev)) {
......@@ -1886,27 +1884,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc;
/* To program the descriptors according to the size of the frame */
if (priv->mode == STMMAC_RING_MODE) {
is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
priv->plat->enh_desc);
if (unlikely(is_jumbo))
entry = priv->hw->ring->jumbo_frm(priv, skb,
csum_insertion);
} else {
is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
priv->plat->enh_desc);
if (unlikely(is_jumbo))
entry = priv->hw->chain->jumbo_frm(priv, skb,
csum_insertion);
}
if (enh_desc)
is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
if (likely(!is_jumbo)) {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
csum_insertion, priv->mode);
} else
} else {
desc = first;
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
}
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
......@@ -2044,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
p->des2 = priv->rx_skbuff_dma[entry];
priv->hw->ring->refill_desc3(priv, p);
priv->hw->mode->refill_desc3(priv, p);
if (netif_msg_rx_status(priv))
pr_debug("\trefill entry #%d\n", entry);
......@@ -2648,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
/* To use the chained or ring mode */
if (chain_mode) {
priv->hw->chain = &chain_mode_ops;
priv->hw->mode = &chain_mode_ops;
pr_info(" Chain mode enabled\n");
priv->mode = STMMAC_CHAIN_MODE;
} else {
priv->hw->ring = &ring_mode_ops;
priv->hw->mode = &ring_mode_ops;
pr_info(" Ring mode enabled\n");
priv->mode = STMMAC_RING_MODE;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册