提交 d6bf3143 编写于 作者: R Russell King 提交者: David S. Miller

net: fec: clean up transmit descriptor setup

Avoid writing any state until we're certain we can proceed with the
transmission: this avoids writing mapping error address values to the
descriptors, or setting the skbuff pointer until we have successfully
mapped the skb.
Acked-by: NFugang Duan <B38611@freescale.com>
Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 730ee360
...@@ -373,6 +373,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -373,6 +373,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
skb_frag_t *this_frag; skb_frag_t *this_frag;
unsigned int index; unsigned int index;
void *bufaddr; void *bufaddr;
dma_addr_t addr;
int i; int i;
for (frag = 0; frag < nr_frags; frag++) { for (frag = 0; frag < nr_frags; frag++) {
...@@ -415,15 +416,16 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -415,15 +416,16 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
swap_buffer(bufaddr, frag_len); swap_buffer(bufaddr, frag_len);
} }
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
frag_len, DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { if (dma_mapping_error(&fep->pdev->dev, addr)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n"); netdev_err(ndev, "Tx DMA memory map failed\n");
goto dma_mapping_error; goto dma_mapping_error;
} }
bdp->cbd_bufaddr = addr;
bdp->cbd_datlen = frag_len; bdp->cbd_datlen = frag_len;
bdp->cbd_sc = status; bdp->cbd_sc = status;
} }
...@@ -450,6 +452,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -450,6 +452,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
int nr_frags = skb_shinfo(skb)->nr_frags; int nr_frags = skb_shinfo(skb)->nr_frags;
struct bufdesc *bdp, *last_bdp; struct bufdesc *bdp, *last_bdp;
void *bufaddr; void *bufaddr;
dma_addr_t addr;
unsigned short status; unsigned short status;
unsigned short buflen; unsigned short buflen;
unsigned int estatus = 0; unsigned int estatus = 0;
...@@ -490,12 +493,9 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -490,12 +493,9 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
swap_buffer(bufaddr, buflen); swap_buffer(bufaddr, buflen);
} }
/* Push the data cache so the CPM does not get stale memory /* Push the data cache so the CPM does not get stale memory data. */
* data. addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
*/ if (dma_mapping_error(&fep->pdev->dev, addr)) {
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
buflen, DMA_TO_DEVICE);
if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n"); netdev_err(ndev, "Tx DMA memory map failed\n");
...@@ -537,6 +537,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -537,6 +537,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
fep->tx_skbuff[index] = skb; fep->tx_skbuff[index] = skb;
bdp->cbd_datlen = buflen; bdp->cbd_datlen = buflen;
bdp->cbd_bufaddr = addr;
/* Send it on its way. Tell FEC it's ready, interrupt when done, /* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end. * it's the last BD of the frame, and to put the CRC on the end.
...@@ -570,12 +571,12 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, ...@@ -570,12 +571,12 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
unsigned short status; unsigned short status;
unsigned int estatus = 0; unsigned int estatus = 0;
dma_addr_t addr;
status = bdp->cbd_sc; status = bdp->cbd_sc;
status &= ~BD_ENET_TX_STATS; status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
bdp->cbd_datlen = size;
if (((unsigned long) data) & FEC_ALIGNMENT || if (((unsigned long) data) & FEC_ALIGNMENT ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
...@@ -586,15 +587,17 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, ...@@ -586,15 +587,17 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
swap_buffer(data, size); swap_buffer(data, size);
} }
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
size, DMA_TO_DEVICE); if (dma_mapping_error(&fep->pdev->dev, addr)) {
if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n"); netdev_err(ndev, "Tx DMA memory map failed\n");
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
bdp->cbd_datlen = size;
bdp->cbd_bufaddr = addr;
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
...@@ -801,7 +804,7 @@ static void fec_enet_bd_init(struct net_device *dev) ...@@ -801,7 +804,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Initialize the BD for every fragment in the page. */ /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0; bdp->cbd_sc = 0;
if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { if (fep->tx_skbuff[i]) {
dev_kfree_skb_any(fep->tx_skbuff[i]); dev_kfree_skb_any(fep->tx_skbuff[i]);
fep->tx_skbuff[i] = NULL; fep->tx_skbuff[i] = NULL;
} }
...@@ -1100,6 +1103,7 @@ fec_enet_tx(struct net_device *ndev) ...@@ -1100,6 +1103,7 @@ fec_enet_tx(struct net_device *ndev)
index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
skb = fep->tx_skbuff[index]; skb = fep->tx_skbuff[index];
fep->tx_skbuff[index] = NULL;
if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr)) if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
bdp->cbd_datlen, DMA_TO_DEVICE); bdp->cbd_datlen, DMA_TO_DEVICE);
...@@ -1154,7 +1158,6 @@ fec_enet_tx(struct net_device *ndev) ...@@ -1154,7 +1158,6 @@ fec_enet_tx(struct net_device *ndev)
/* Free the sk buffer associated with this last transmit */ /* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
fep->tx_skbuff[index] = NULL;
fep->dirty_tx = bdp; fep->dirty_tx = bdp;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册