提交 5cfa3039 编写于 作者: J Johannes Berg 提交者: David S. Miller

net: fec: make driver endian-safe

The driver treats the device descriptors as CPU-endian, which appears
to be correct with the default endianness on both ARM (typically LE)
and PowerPC (typically BE) SoCs, indicating that the hardware block
is generated differently. Add endianness annotations and byteswaps as
necessary.

It's not clear that the ifdef there really is correct and shouldn't
just be #ifdef CONFIG_ARM, but I also can't test on anything but the
i.MX6 HummingBoard where this gets it working with a BE kernel.
Signed-off-by: NJohannes Berg <johannes@sipsolutions.net>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 db0e51af
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
obj-$(CONFIG_FEC) += fec.o obj-$(CONFIG_FEC) += fec.o
fec-objs :=fec_main.o fec_ptp.o fec-objs :=fec_main.o fec_ptp.o
CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
......
...@@ -190,28 +190,46 @@ ...@@ -190,28 +190,46 @@
/* /*
* Define the buffer descriptor structure. * Define the buffer descriptor structure.
*
* Evidently, ARM SoCs have the FEC block generated in a
* little endian mode; or at least ARCH_MXC/SOC_IMX28 do,
* so adjust endianness accordingly.
*/ */
#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) #if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
#define fec32_to_cpu le32_to_cpu
#define fec16_to_cpu le16_to_cpu
#define cpu_to_fec32 cpu_to_le32
#define cpu_to_fec16 cpu_to_le16
#define __fec32 __le32
#define __fec16 __le16
struct bufdesc { struct bufdesc {
unsigned short cbd_datlen; /* Data length */ __fec16 cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */ __fec16 cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */ __fec32 cbd_bufaddr; /* Buffer address */
}; };
#else #else
#define fec32_to_cpu be32_to_cpu
#define fec16_to_cpu be16_to_cpu
#define cpu_to_fec32 cpu_to_be32
#define cpu_to_fec16 cpu_to_be16
#define __fec32 __be32
#define __fec16 __be16
struct bufdesc { struct bufdesc {
unsigned short cbd_sc; /* Control and status info */ __fec16 cbd_sc; /* Control and status info */
unsigned short cbd_datlen; /* Data length */ __fec16 cbd_datlen; /* Data length */
unsigned long cbd_bufaddr; /* Buffer address */ __fec32 cbd_bufaddr; /* Buffer address */
}; };
#endif #endif
struct bufdesc_ex { struct bufdesc_ex {
struct bufdesc desc; struct bufdesc desc;
unsigned long cbd_esc; __fec32 cbd_esc;
unsigned long cbd_prot; __fec32 cbd_prot;
unsigned long cbd_bdu; __fec32 cbd_bdu;
unsigned long ts; __fec32 ts;
unsigned short res0[4]; __fec16 res0[4];
}; };
/* /*
......
...@@ -332,11 +332,13 @@ static void fec_dump(struct net_device *ndev) ...@@ -332,11 +332,13 @@ static void fec_dump(struct net_device *ndev)
bdp = txq->tx_bd_base; bdp = txq->tx_bd_base;
do { do {
pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
index, index,
bdp == txq->cur_tx ? 'S' : ' ', bdp == txq->cur_tx ? 'S' : ' ',
bdp == txq->dirty_tx ? 'H' : ' ', bdp == txq->dirty_tx ? 'H' : ' ',
bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, fec16_to_cpu(bdp->cbd_sc),
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
txq->tx_skbuff[index]); txq->tx_skbuff[index]);
bdp = fec_enet_get_nextdesc(bdp, fep, 0); bdp = fec_enet_get_nextdesc(bdp, fep, 0);
index++; index++;
...@@ -389,7 +391,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -389,7 +391,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, fep, queue);
ebdp = (struct bufdesc_ex *)bdp; ebdp = (struct bufdesc_ex *)bdp;
status = bdp->cbd_sc; status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS; status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
frag_len = skb_shinfo(skb)->frags[frag].size; frag_len = skb_shinfo(skb)->frags[frag].size;
...@@ -411,7 +413,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -411,7 +413,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
ebdp->cbd_esc = estatus; ebdp->cbd_esc = cpu_to_fec32(estatus);
} }
bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
...@@ -435,9 +437,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -435,9 +437,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
goto dma_mapping_error; goto dma_mapping_error;
} }
bdp->cbd_bufaddr = addr; bdp->cbd_bufaddr = cpu_to_fec32(addr);
bdp->cbd_datlen = frag_len; bdp->cbd_datlen = cpu_to_fec16(frag_len);
bdp->cbd_sc = status; bdp->cbd_sc = cpu_to_fec16(status);
} }
return bdp; return bdp;
...@@ -445,8 +447,8 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -445,8 +447,8 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
bdp = txq->cur_tx; bdp = txq->cur_tx;
for (i = 0; i < frag; i++) { for (i = 0; i < frag; i++) {
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, fep, queue);
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
bdp->cbd_datlen, DMA_TO_DEVICE); fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
} }
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -483,7 +485,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -483,7 +485,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
/* Fill in a Tx ring entry */ /* Fill in a Tx ring entry */
bdp = txq->cur_tx; bdp = txq->cur_tx;
last_bdp = bdp; last_bdp = bdp;
status = bdp->cbd_sc; status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS; status &= ~BD_ENET_TX_STATS;
/* Set buffer length and buffer pointer */ /* Set buffer length and buffer pointer */
...@@ -539,21 +541,21 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -539,21 +541,21 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
ebdp->cbd_esc = estatus; ebdp->cbd_esc = cpu_to_fec32(estatus);
} }
index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
/* Save skb pointer */ /* Save skb pointer */
txq->tx_skbuff[index] = skb; txq->tx_skbuff[index] = skb;
bdp->cbd_datlen = buflen; bdp->cbd_datlen = cpu_to_fec16(buflen);
bdp->cbd_bufaddr = addr; bdp->cbd_bufaddr = cpu_to_fec32(addr);
/* Send it on its way. Tell FEC it's ready, interrupt when done, /* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end. * it's the last BD of the frame, and to put the CRC on the end.
*/ */
status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
bdp->cbd_sc = status; bdp->cbd_sc = cpu_to_fec16(status);
/* If this was the last BD in the ring, start at the beginning again. */ /* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
...@@ -585,7 +587,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, ...@@ -585,7 +587,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
unsigned int estatus = 0; unsigned int estatus = 0;
dma_addr_t addr; dma_addr_t addr;
status = bdp->cbd_sc; status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS; status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
...@@ -607,8 +609,8 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, ...@@ -607,8 +609,8 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
bdp->cbd_datlen = size; bdp->cbd_datlen = cpu_to_fec16(size);
bdp->cbd_bufaddr = addr; bdp->cbd_bufaddr = cpu_to_fec32(addr);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
...@@ -616,7 +618,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, ...@@ -616,7 +618,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
ebdp->cbd_esc = estatus; ebdp->cbd_esc = cpu_to_fec32(estatus);
} }
/* Handle the last BD specially */ /* Handle the last BD specially */
...@@ -625,10 +627,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, ...@@ -625,10 +627,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
if (is_last) { if (is_last) {
status |= BD_ENET_TX_INTR; status |= BD_ENET_TX_INTR;
if (fep->bufdesc_ex) if (fep->bufdesc_ex)
ebdp->cbd_esc |= BD_ENET_TX_INT; ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
} }
bdp->cbd_sc = status; bdp->cbd_sc = cpu_to_fec16(status);
return 0; return 0;
} }
...@@ -647,7 +649,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, ...@@ -647,7 +649,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
unsigned short status; unsigned short status;
unsigned int estatus = 0; unsigned int estatus = 0;
status = bdp->cbd_sc; status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS; status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
...@@ -671,8 +673,8 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, ...@@ -671,8 +673,8 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
} }
} }
bdp->cbd_bufaddr = dmabuf; bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
bdp->cbd_datlen = hdr_len; bdp->cbd_datlen = cpu_to_fec16(hdr_len);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
...@@ -680,10 +682,10 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, ...@@ -680,10 +682,10 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
ebdp->cbd_esc = estatus; ebdp->cbd_esc = cpu_to_fec32(estatus);
} }
bdp->cbd_sc = status; bdp->cbd_sc = cpu_to_fec16(status);
return 0; return 0;
} }
...@@ -823,15 +825,15 @@ static void fec_enet_bd_init(struct net_device *dev) ...@@ -823,15 +825,15 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Initialize the BD for every fragment in the page. */ /* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr) if (bdp->cbd_bufaddr)
bdp->cbd_sc = BD_ENET_RX_EMPTY; bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
else else
bdp->cbd_sc = 0; bdp->cbd_sc = cpu_to_fec16(0);
bdp = fec_enet_get_nextdesc(bdp, fep, q); bdp = fec_enet_get_nextdesc(bdp, fep, q);
} }
/* Set the last buffer to wrap */ /* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep, q); bdp = fec_enet_get_prevdesc(bdp, fep, q);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
rxq->cur_rx = rxq->rx_bd_base; rxq->cur_rx = rxq->rx_bd_base;
} }
...@@ -844,18 +846,18 @@ static void fec_enet_bd_init(struct net_device *dev) ...@@ -844,18 +846,18 @@ static void fec_enet_bd_init(struct net_device *dev)
for (i = 0; i < txq->tx_ring_size; i++) { for (i = 0; i < txq->tx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */ /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0; bdp->cbd_sc = cpu_to_fec16(0);
if (txq->tx_skbuff[i]) { if (txq->tx_skbuff[i]) {
dev_kfree_skb_any(txq->tx_skbuff[i]); dev_kfree_skb_any(txq->tx_skbuff[i]);
txq->tx_skbuff[i] = NULL; txq->tx_skbuff[i] = NULL;
} }
bdp->cbd_bufaddr = 0; bdp->cbd_bufaddr = cpu_to_fec32(0);
bdp = fec_enet_get_nextdesc(bdp, fep, q); bdp = fec_enet_get_nextdesc(bdp, fep, q);
} }
/* Set the last buffer to wrap */ /* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep, q); bdp = fec_enet_get_prevdesc(bdp, fep, q);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
txq->dirty_tx = bdp; txq->dirty_tx = bdp;
} }
} }
...@@ -947,8 +949,10 @@ fec_restart(struct net_device *ndev) ...@@ -947,8 +949,10 @@ fec_restart(struct net_device *ndev)
*/ */
if (fep->quirks & FEC_QUIRK_ENET_MAC) { if (fep->quirks & FEC_QUIRK_ENET_MAC) {
memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); writel((__force u32)cpu_to_be32(temp_mac[0]),
writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); fep->hwp + FEC_ADDR_LOW);
writel((__force u32)cpu_to_be32(temp_mac[1]),
fep->hwp + FEC_ADDR_HIGH);
} }
/* Clear any outstanding interrupt. */ /* Clear any outstanding interrupt. */
...@@ -1222,7 +1226,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ...@@ -1222,7 +1226,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
while (bdp != READ_ONCE(txq->cur_tx)) { while (bdp != READ_ONCE(txq->cur_tx)) {
/* Order the load of cur_tx and cbd_sc */ /* Order the load of cur_tx and cbd_sc */
rmb(); rmb();
status = READ_ONCE(bdp->cbd_sc); status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
if (status & BD_ENET_TX_READY) if (status & BD_ENET_TX_READY)
break; break;
...@@ -1230,10 +1234,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ...@@ -1230,10 +1234,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
skb = txq->tx_skbuff[index]; skb = txq->tx_skbuff[index];
txq->tx_skbuff[index] = NULL; txq->tx_skbuff[index] = NULL;
if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, dma_unmap_single(&fep->pdev->dev,
bdp->cbd_datlen, DMA_TO_DEVICE); fec32_to_cpu(bdp->cbd_bufaddr),
bdp->cbd_bufaddr = 0; fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0);
if (!skb) { if (!skb) {
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
continue; continue;
...@@ -1264,7 +1270,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ...@@ -1264,7 +1270,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
struct skb_shared_hwtstamps shhwtstamps; struct skb_shared_hwtstamps shhwtstamps;
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
skb_tstamp_tx(skb, &shhwtstamps); skb_tstamp_tx(skb, &shhwtstamps);
} }
...@@ -1324,10 +1330,8 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff ...@@ -1324,10 +1330,8 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
if (off) if (off)
skb_reserve(skb, fep->rx_align + 1 - off); skb_reserve(skb, fep->rx_align + 1 - off);
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
FEC_ENET_RX_FRSIZE - fep->rx_align, if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
DMA_FROM_DEVICE);
if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "Rx DMA memory map failed\n"); netdev_err(ndev, "Rx DMA memory map failed\n");
return -ENOMEM; return -ENOMEM;
...@@ -1349,7 +1353,8 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, ...@@ -1349,7 +1353,8 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
if (!new_skb) if (!new_skb)
return false; return false;
dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, dma_sync_single_for_cpu(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align, FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (!swap) if (!swap)
...@@ -1396,7 +1401,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1396,7 +1401,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
*/ */
bdp = rxq->cur_rx; bdp = rxq->cur_rx;
while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
if (pkt_received >= budget) if (pkt_received >= budget)
break; break;
...@@ -1438,7 +1443,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1438,7 +1443,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* Process the incoming frame. */ /* Process the incoming frame. */
ndev->stats.rx_packets++; ndev->stats.rx_packets++;
pkt_len = bdp->cbd_datlen; pkt_len = fec16_to_cpu(bdp->cbd_datlen);
ndev->stats.rx_bytes += pkt_len; ndev->stats.rx_bytes += pkt_len;
index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
...@@ -1456,7 +1461,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1456,7 +1461,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
goto rx_processing_done; goto rx_processing_done;
} }
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align, FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
...@@ -1475,7 +1481,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1475,7 +1481,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* If this is a VLAN packet remove the VLAN Tag */ /* If this is a VLAN packet remove the VLAN Tag */
vlan_packet_rcvd = false; vlan_packet_rcvd = false;
if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { fep->bufdesc_ex &&
(ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
/* Push and remove the vlan tag */ /* Push and remove the vlan tag */
struct vlan_hdr *vlan_header = struct vlan_hdr *vlan_header =
(struct vlan_hdr *) (data + ETH_HLEN); (struct vlan_hdr *) (data + ETH_HLEN);
...@@ -1491,12 +1498,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1491,12 +1498,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* Get receive timestamp from the skb */ /* Get receive timestamp from the skb */
if (fep->hwts_rx_en && fep->bufdesc_ex) if (fep->hwts_rx_en && fep->bufdesc_ex)
fec_enet_hwtstamp(fep, ebdp->ts, fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
skb_hwtstamps(skb)); skb_hwtstamps(skb));
if (fep->bufdesc_ex && if (fep->bufdesc_ex &&
(fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
/* don't check it */ /* don't check it */
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} else { } else {
...@@ -1513,7 +1520,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1513,7 +1520,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
napi_gro_receive(&fep->napi, skb); napi_gro_receive(&fep->napi, skb);
if (is_copybreak) { if (is_copybreak) {
dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, dma_sync_single_for_device(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align, FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} else { } else {
...@@ -1527,12 +1535,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1527,12 +1535,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* Mark the buffer empty */ /* Mark the buffer empty */
status |= BD_ENET_RX_EMPTY; status |= BD_ENET_RX_EMPTY;
bdp->cbd_sc = status; bdp->cbd_sc = cpu_to_fec16(status);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
ebdp->cbd_esc = BD_ENET_RX_INT; ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
ebdp->cbd_prot = 0; ebdp->cbd_prot = 0;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
} }
...@@ -2662,7 +2670,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) ...@@ -2662,7 +2670,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
rxq->rx_skbuff[i] = NULL; rxq->rx_skbuff[i] = NULL;
if (skb) { if (skb) {
dma_unmap_single(&fep->pdev->dev, dma_unmap_single(&fep->pdev->dev,
bdp->cbd_bufaddr, fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align, FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dev_kfree_skb(skb); dev_kfree_skb(skb);
...@@ -2777,11 +2785,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) ...@@ -2777,11 +2785,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
} }
rxq->rx_skbuff[i] = skb; rxq->rx_skbuff[i] = skb;
bdp->cbd_sc = BD_ENET_RX_EMPTY; bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
ebdp->cbd_esc = BD_ENET_RX_INT; ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
} }
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, fep, queue);
...@@ -2789,7 +2797,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) ...@@ -2789,7 +2797,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */ /* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp = fec_enet_get_prevdesc(bdp, fep, queue);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
return 0; return 0;
err_alloc: err_alloc:
...@@ -2812,12 +2820,12 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) ...@@ -2812,12 +2820,12 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
if (!txq->tx_bounce[i]) if (!txq->tx_bounce[i])
goto err_alloc; goto err_alloc;
bdp->cbd_sc = 0; bdp->cbd_sc = cpu_to_fec16(0);
bdp->cbd_bufaddr = 0; bdp->cbd_bufaddr = cpu_to_fec32(0);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
ebdp->cbd_esc = BD_ENET_TX_INT; ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
} }
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, fep, queue);
...@@ -2825,7 +2833,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) ...@@ -2825,7 +2833,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */ /* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp = fec_enet_get_prevdesc(bdp, fep, queue);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册