提交 e2c41f14 编写于 作者: T Tony Prisk 提交者: David S. Miller

net: velocity: Convert to generic dma functions

Remove the pci_* dma functions and replace with the more generic
versions.

In preparation of adding platform support, a new struct device *dev
is added to struct velocity_info which can be used by both the pci
and platform code.
Signed-off-by: NTony Prisk <linux@prisktech.co.nz>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 a9683c94
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/ioport.h> #include <linux/ioport.h>
...@@ -1459,7 +1460,6 @@ static int velocity_init_dma_rings(struct velocity_info *vptr) ...@@ -1459,7 +1460,6 @@ static int velocity_init_dma_rings(struct velocity_info *vptr)
struct velocity_opt *opt = &vptr->options; struct velocity_opt *opt = &vptr->options;
const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
struct pci_dev *pdev = vptr->pdev;
dma_addr_t pool_dma; dma_addr_t pool_dma;
void *pool; void *pool;
unsigned int i; unsigned int i;
...@@ -1467,13 +1467,13 @@ static int velocity_init_dma_rings(struct velocity_info *vptr) ...@@ -1467,13 +1467,13 @@ static int velocity_init_dma_rings(struct velocity_info *vptr)
/* /*
* Allocate all RD/TD rings a single pool. * Allocate all RD/TD rings a single pool.
* *
* pci_alloc_consistent() fulfills the requirement for 64 bytes * dma_alloc_coherent() fulfills the requirement for 64 bytes
* alignment * alignment
*/ */
pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
rx_ring_size, &pool_dma); rx_ring_size, &pool_dma, GFP_ATOMIC);
if (!pool) { if (!pool) {
dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
vptr->netdev->name); vptr->netdev->name);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1524,8 +1524,8 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) ...@@ -1524,8 +1524,8 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
*/ */
skb_reserve(rd_info->skb, skb_reserve(rd_info->skb,
64 - ((unsigned long) rd_info->skb->data & 63)); 64 - ((unsigned long) rd_info->skb->data & 63));
rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); vptr->rx.buf_sz, DMA_FROM_DEVICE);
/* /*
* Fill in the descriptor to match * Fill in the descriptor to match
...@@ -1588,8 +1588,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr) ...@@ -1588,8 +1588,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
if (!rd_info->skb) if (!rd_info->skb)
continue; continue;
pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
rd_info->skb_dma = 0; rd_info->skb_dma = 0;
dev_kfree_skb(rd_info->skb); dev_kfree_skb(rd_info->skb);
...@@ -1670,7 +1670,7 @@ static void velocity_free_dma_rings(struct velocity_info *vptr) ...@@ -1670,7 +1670,7 @@ static void velocity_free_dma_rings(struct velocity_info *vptr)
const int size = vptr->options.numrx * sizeof(struct rx_desc) + const int size = vptr->options.numrx * sizeof(struct rx_desc) +
vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
} }
static int velocity_init_rings(struct velocity_info *vptr, int mtu) static int velocity_init_rings(struct velocity_info *vptr, int mtu)
...@@ -1727,8 +1727,8 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, ...@@ -1727,8 +1727,8 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
pktlen = max_t(size_t, pktlen, pktlen = max_t(size_t, pktlen,
td->td_buf[i].size & ~TD_QUEUE); td->td_buf[i].size & ~TD_QUEUE);
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
le16_to_cpu(pktlen), PCI_DMA_TODEVICE); le16_to_cpu(pktlen), DMA_TO_DEVICE);
} }
} }
dev_kfree_skb_irq(skb); dev_kfree_skb_irq(skb);
...@@ -1750,8 +1750,8 @@ static void velocity_free_td_ring_entry(struct velocity_info *vptr, ...@@ -1750,8 +1750,8 @@ static void velocity_free_td_ring_entry(struct velocity_info *vptr,
if (td_info->skb) { if (td_info->skb) {
for (i = 0; i < td_info->nskb_dma; i++) { for (i = 0; i < td_info->nskb_dma; i++) {
if (td_info->skb_dma[i]) { if (td_info->skb_dma[i]) {
pci_unmap_single(vptr->pdev, td_info->skb_dma[i], dma_unmap_single(vptr->dev, td_info->skb_dma[i],
td_info->skb->len, PCI_DMA_TODEVICE); td_info->skb->len, DMA_TO_DEVICE);
td_info->skb_dma[i] = 0; td_info->skb_dma[i] = 0;
} }
} }
...@@ -2029,7 +2029,6 @@ static inline void velocity_iph_realign(struct velocity_info *vptr, ...@@ -2029,7 +2029,6 @@ static inline void velocity_iph_realign(struct velocity_info *vptr,
*/ */
static int velocity_receive_frame(struct velocity_info *vptr, int idx) static int velocity_receive_frame(struct velocity_info *vptr, int idx)
{ {
void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
struct net_device_stats *stats = &vptr->netdev->stats; struct net_device_stats *stats = &vptr->netdev->stats;
struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
struct rx_desc *rd = &(vptr->rx.ring[idx]); struct rx_desc *rd = &(vptr->rx.ring[idx]);
...@@ -2047,8 +2046,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) ...@@ -2047,8 +2046,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
skb = rd_info->skb; skb = rd_info->skb;
pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); vptr->rx.buf_sz, DMA_FROM_DEVICE);
/* /*
* Drop frame not meeting IEEE 802.3 * Drop frame not meeting IEEE 802.3
...@@ -2061,19 +2060,18 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) ...@@ -2061,19 +2060,18 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
} }
} }
pci_action = pci_dma_sync_single_for_device;
velocity_rx_csum(rd, skb); velocity_rx_csum(rd, skb);
if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
velocity_iph_realign(vptr, skb, pkt_len); velocity_iph_realign(vptr, skb, pkt_len);
pci_action = pci_unmap_single;
rd_info->skb = NULL; rd_info->skb = NULL;
dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
DMA_FROM_DEVICE);
} else {
dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
vptr->rx.buf_sz, DMA_FROM_DEVICE);
} }
pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len - 4); skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, vptr->netdev); skb->protocol = eth_type_trans(skb, vptr->netdev);
...@@ -2550,7 +2548,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb, ...@@ -2550,7 +2548,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
* add it to the transmit ring. * add it to the transmit ring.
*/ */
tdinfo->skb = skb; tdinfo->skb = skb;
tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
DMA_TO_DEVICE);
td_ptr->tdesc0.len = cpu_to_le16(pktlen); td_ptr->tdesc0.len = cpu_to_le16(pktlen);
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0; td_ptr->td_buf[0].pa_high = 0;
...@@ -2560,7 +2559,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb, ...@@ -2560,7 +2559,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev, tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
frag, 0, frag, 0,
skb_frag_size(frag), skb_frag_size(frag),
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -2637,6 +2636,7 @@ static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, ...@@ -2637,6 +2636,7 @@ static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
{ {
memset(vptr, 0, sizeof(struct velocity_info)); memset(vptr, 0, sizeof(struct velocity_info));
vptr->dev = &pdev->dev;
vptr->pdev = pdev; vptr->pdev = pdev;
vptr->chip_id = info->chip_id; vptr->chip_id = info->chip_id;
vptr->tx.numq = info->txqueue; vptr->tx.numq = info->txqueue;
...@@ -2744,7 +2744,6 @@ static int velocity_found1(struct pci_dev *pdev, ...@@ -2744,7 +2744,6 @@ static int velocity_found1(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
vptr = netdev_priv(dev); vptr = netdev_priv(dev);
if (first) { if (first) {
printk(KERN_INFO "%s Ver. %s\n", printk(KERN_INFO "%s Ver. %s\n",
VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
......
...@@ -1434,6 +1434,7 @@ struct velocity_opt { ...@@ -1434,6 +1434,7 @@ struct velocity_opt {
#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) #define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
struct velocity_info { struct velocity_info {
struct device *dev;
struct pci_dev *pdev; struct pci_dev *pdev;
struct net_device *netdev; struct net_device *netdev;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册