提交 55c82617 编写于 作者: C Christoph Hellwig 提交者: David S. Miller

3c59x: convert to generic DMA API

This driver supports EISA devices in addition to PCI devices, and relied
on the legacy behavior of the pci_dma* shims to pass on a NULL pointer
to the DMA API, and the DMA API being able to handle that.  When the
NULL forwarding broke the EISA support got broken.  Fix this by converting
to the DMA API instead of the legacy PCI shims.

Fixes: 4167b2ad ("PCI: Remove NULL device handling from PCI DMA API")
Reported-by: Ntedheadster <tedheadster@gmail.com>
Tested-by: Ntedheadster <tedheadster@gmail.com>
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 b84bbaf7
...@@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, ...@@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
vp->mii.reg_num_mask = 0x1f; vp->mii.reg_num_mask = 0x1f;
/* Makes sure rings are at least 16 byte aligned. */ /* Makes sure rings are at least 16 byte aligned. */
vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE, + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
&vp->rx_ring_dma); &vp->rx_ring_dma, GFP_KERNEL);
retval = -ENOMEM; retval = -ENOMEM;
if (!vp->rx_ring) if (!vp->rx_ring)
goto free_device; goto free_device;
...@@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, ...@@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
return 0; return 0;
free_ring: free_ring:
pci_free_consistent(pdev, dma_free_coherent(&pdev->dev,
sizeof(struct boom_rx_desc) * RX_RING_SIZE sizeof(struct boom_rx_desc) * RX_RING_SIZE +
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE, sizeof(struct boom_tx_desc) * TX_RING_SIZE,
vp->rx_ring, vp->rx_ring, vp->rx_ring_dma);
vp->rx_ring_dma);
free_device: free_device:
free_netdev(dev); free_netdev(dev);
pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
...@@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev) ...@@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev)
break; /* Bad news! */ break; /* Bad news! */
skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
dma = pci_map_single(VORTEX_PCI(vp), skb->data, dma = dma_map_single(vp->gendev, skb->data,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE); PKT_BUF_SZ, DMA_FROM_DEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma)) if (dma_mapping_error(vp->gendev, dma))
break; break;
vp->rx_ring[i].addr = cpu_to_le32(dma); vp->rx_ring[i].addr = cpu_to_le32(dma);
} }
...@@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (vp->bus_master) { if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */ /* Set the bus-master controller to transfer the packet. */
int len = (skb->len + 3) & ~3; int len = (skb->len + 3) & ~3;
vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) { if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
dev->stats.tx_dropped++; dev->stats.tx_dropped++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
if (!skb_shinfo(skb)->nr_frags) { if (!skb_shinfo(skb)->nr_frags) {
dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) if (dma_mapping_error(vp->gendev, dma_addr))
goto out_dma_err; goto out_dma_err;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
...@@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else { } else {
int i; int i;
dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, dma_addr = dma_map_single(vp->gendev, skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE); skb_headlen(skb), DMA_TO_DEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) if (dma_mapping_error(vp->gendev, dma_addr))
goto out_dma_err; goto out_dma_err;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
...@@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, dma_addr = skb_frag_dma_map(vp->gendev, frag,
0, 0,
frag->size, frag->size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { if (dma_mapping_error(vp->gendev, dma_addr)) {
for(i = i-1; i >= 0; i--) for(i = i-1; i >= 0; i--)
dma_unmap_page(&VORTEX_PCI(vp)->dev, dma_unmap_page(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
DMA_TO_DEVICE); DMA_TO_DEVICE);
pci_unmap_single(VORTEX_PCI(vp), dma_unmap_single(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].frag[0].addr), le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
le32_to_cpu(vp->tx_ring[entry].frag[0].length), le32_to_cpu(vp->tx_ring[entry].frag[0].length),
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
goto out_dma_err; goto out_dma_err;
} }
...@@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
} }
#else #else
dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE); dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) if (dma_mapping_error(vp->gendev, dma_addr))
goto out_dma_err; goto out_dma_err;
vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
...@@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
out: out:
return NETDEV_TX_OK; return NETDEV_TX_OK;
out_dma_err: out_dma_err:
dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); dev_err(vp->gendev, "Error mapping dma buffer\n");
goto out; goto out;
} }
...@@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id) ...@@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id)
if (status & DMADone) { if (status & DMADone) {
if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
pkts_compl++; pkts_compl++;
bytes_compl += vp->tx_skb->len; bytes_compl += vp->tx_skb->len;
dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
...@@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id) ...@@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id)
struct sk_buff *skb = vp->tx_skbuff[entry]; struct sk_buff *skb = vp->tx_skbuff[entry];
#if DO_ZEROCOPY #if DO_ZEROCOPY
int i; int i;
pci_unmap_single(VORTEX_PCI(vp), dma_unmap_single(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].frag[0].addr), le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF, le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
pci_unmap_page(VORTEX_PCI(vp), dma_unmap_page(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].frag[i].addr), le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
#else #else
pci_unmap_single(VORTEX_PCI(vp), dma_unmap_single(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
#endif #endif
pkts_compl++; pkts_compl++;
bytes_compl += skb->len; bytes_compl += skb->len;
...@@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev) ...@@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev)
/* 'skb_put()' points to the start of sk_buff data area. */ /* 'skb_put()' points to the start of sk_buff data area. */
if (vp->bus_master && if (vp->bus_master &&
! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
pkt_len, PCI_DMA_FROMDEVICE); pkt_len, DMA_FROM_DEVICE);
iowrite32(dma, ioaddr + Wn7_MasterAddr); iowrite32(dma, ioaddr + Wn7_MasterAddr);
iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
iowrite16(StartDMAUp, ioaddr + EL3_CMD); iowrite16(StartDMAUp, ioaddr + EL3_CMD);
while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
; ;
pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
} else { } else {
ioread32_rep(ioaddr + RX_FIFO, ioread32_rep(ioaddr + RX_FIFO,
skb_put(skb, pkt_len), skb_put(skb, pkt_len),
...@@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev) ...@@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev)
if (pkt_len < rx_copybreak && if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
/* 'skb_put()' points to the start of sk_buff data area. */ /* 'skb_put()' points to the start of sk_buff data area. */
skb_put_data(skb, vp->rx_skbuff[entry]->data, skb_put_data(skb, vp->rx_skbuff[entry]->data,
pkt_len); pkt_len);
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
vp->rx_copy++; vp->rx_copy++;
} else { } else {
/* Pre-allocate the replacement skb. If it or its /* Pre-allocate the replacement skb. If it or its
...@@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev) ...@@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev)
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
goto clear_complete; goto clear_complete;
} }
newdma = pci_map_single(VORTEX_PCI(vp), newskb->data, newdma = dma_map_single(vp->gendev, newskb->data,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE); PKT_BUF_SZ, DMA_FROM_DEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) { if (dma_mapping_error(vp->gendev, newdma)) {
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
consume_skb(newskb); consume_skb(newskb);
goto clear_complete; goto clear_complete;
...@@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev) ...@@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev)
vp->rx_skbuff[entry] = newskb; vp->rx_skbuff[entry] = newskb;
vp->rx_ring[entry].addr = cpu_to_le32(newdma); vp->rx_ring[entry].addr = cpu_to_le32(newdma);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
vp->rx_nocopy++; vp->rx_nocopy++;
} }
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
...@@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev) ...@@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev)
if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
for (i = 0; i < RX_RING_SIZE; i++) for (i = 0; i < RX_RING_SIZE; i++)
if (vp->rx_skbuff[i]) { if (vp->rx_skbuff[i]) {
pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
PKT_BUF_SZ, PCI_DMA_FROMDEVICE); PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb(vp->rx_skbuff[i]); dev_kfree_skb(vp->rx_skbuff[i]);
vp->rx_skbuff[i] = NULL; vp->rx_skbuff[i] = NULL;
} }
...@@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev) ...@@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev)
int k; int k;
for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
pci_unmap_single(VORTEX_PCI(vp), dma_unmap_single(vp->gendev,
le32_to_cpu(vp->tx_ring[i].frag[k].addr), le32_to_cpu(vp->tx_ring[i].frag[k].addr),
le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
#else #else
pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
#endif #endif
dev_kfree_skb(skb); dev_kfree_skb(skb);
vp->tx_skbuff[i] = NULL; vp->tx_skbuff[i] = NULL;
...@@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev) ...@@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, vp->ioaddr); pci_iounmap(pdev, vp->ioaddr);
pci_free_consistent(pdev, dma_free_coherent(&pdev->dev,
sizeof(struct boom_rx_desc) * RX_RING_SIZE sizeof(struct boom_rx_desc) * RX_RING_SIZE +
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE, sizeof(struct boom_tx_desc) * TX_RING_SIZE,
vp->rx_ring, vp->rx_ring, vp->rx_ring_dma);
vp->rx_ring_dma);
pci_release_regions(pdev); pci_release_regions(pdev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册