提交 1a983142 编写于 作者: F FUJITA Tomonori 提交者: David S. Miller

bnx2x: use the DMA API instead of the pci equivalents

The DMA API is preferred.
Signed-off-by: NFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: NVladislav Zolotarov <vladz@broadcom.com>
Acked-by: NEilon Greenstein <eilong@broadcom.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 1a4ccc2d
...@@ -163,7 +163,7 @@ do { \ ...@@ -163,7 +163,7 @@ do { \
struct sw_rx_bd { struct sw_rx_bd {
struct sk_buff *skb; struct sk_buff *skb;
DECLARE_PCI_UNMAP_ADDR(mapping) DEFINE_DMA_UNMAP_ADDR(mapping);
}; };
struct sw_tx_bd { struct sw_tx_bd {
...@@ -176,7 +176,7 @@ struct sw_tx_bd { ...@@ -176,7 +176,7 @@ struct sw_tx_bd {
struct sw_rx_page { struct sw_rx_page {
struct page *page; struct page *page;
DECLARE_PCI_UNMAP_ADDR(mapping) DEFINE_DMA_UNMAP_ADDR(mapping);
}; };
union db_prod { union db_prod {
......
...@@ -842,7 +842,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -842,7 +842,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* unmap first bd */ /* unmap first bd */
DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd), dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
nbd = le16_to_cpu(tx_start_bd->nbd) - 1; nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
...@@ -872,8 +872,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -872,8 +872,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd), dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE); BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
if (--nbd) if (--nbd)
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
} }
...@@ -1086,7 +1086,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, ...@@ -1086,7 +1086,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
if (!page) if (!page)
return; return;
pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
__free_pages(page, PAGES_PER_SGE_SHIFT); __free_pages(page, PAGES_PER_SGE_SHIFT);
...@@ -1115,15 +1115,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, ...@@ -1115,15 +1115,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
if (unlikely(page == NULL)) if (unlikely(page == NULL))
return -ENOMEM; return -ENOMEM;
mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE, mapping = dma_map_page(&bp->pdev->dev, page, 0,
PCI_DMA_FROMDEVICE); SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT); __free_pages(page, PAGES_PER_SGE_SHIFT);
return -ENOMEM; return -ENOMEM;
} }
sw_buf->page = page; sw_buf->page = page;
pci_unmap_addr_set(sw_buf, mapping, mapping); dma_unmap_addr_set(sw_buf, mapping, mapping);
sge->addr_hi = cpu_to_le32(U64_HI(mapping)); sge->addr_hi = cpu_to_le32(U64_HI(mapping));
sge->addr_lo = cpu_to_le32(U64_LO(mapping)); sge->addr_lo = cpu_to_le32(U64_LO(mapping));
...@@ -1143,15 +1143,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, ...@@ -1143,15 +1143,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
if (unlikely(skb == NULL)) if (unlikely(skb == NULL))
return -ENOMEM; return -ENOMEM;
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size, mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
return -ENOMEM; return -ENOMEM;
} }
rx_buf->skb = skb; rx_buf->skb = skb;
pci_unmap_addr_set(rx_buf, mapping, mapping); dma_unmap_addr_set(rx_buf, mapping, mapping);
rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
...@@ -1173,13 +1173,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, ...@@ -1173,13 +1173,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
pci_dma_sync_single_for_device(bp->pdev, dma_sync_single_for_device(&bp->pdev->dev,
pci_unmap_addr(cons_rx_buf, mapping), dma_unmap_addr(cons_rx_buf, mapping),
RX_COPY_THRESH, PCI_DMA_FROMDEVICE); RX_COPY_THRESH, DMA_FROM_DEVICE);
prod_rx_buf->skb = cons_rx_buf->skb; prod_rx_buf->skb = cons_rx_buf->skb;
pci_unmap_addr_set(prod_rx_buf, mapping, dma_unmap_addr_set(prod_rx_buf, mapping,
pci_unmap_addr(cons_rx_buf, mapping)); dma_unmap_addr(cons_rx_buf, mapping));
*prod_bd = *cons_bd; *prod_bd = *cons_bd;
} }
...@@ -1283,9 +1283,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, ...@@ -1283,9 +1283,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
/* move empty skb from pool to prod and map it */ /* move empty skb from pool to prod and map it */
prod_rx_buf->skb = fp->tpa_pool[queue].skb; prod_rx_buf->skb = fp->tpa_pool[queue].skb;
mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
bp->rx_buf_size, PCI_DMA_FROMDEVICE); bp->rx_buf_size, DMA_FROM_DEVICE);
pci_unmap_addr_set(prod_rx_buf, mapping, mapping); dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
/* move partial skb from cons to pool (don't unmap yet) */ /* move partial skb from cons to pool (don't unmap yet) */
fp->tpa_pool[queue] = *cons_rx_buf; fp->tpa_pool[queue] = *cons_rx_buf;
...@@ -1361,8 +1361,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -1361,8 +1361,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
} }
/* Unmap the page as we r going to pass it to the stack */ /* Unmap the page as we r going to pass it to the stack */
pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), dma_unmap_page(&bp->pdev->dev,
SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); dma_unmap_addr(&old_rx_pg, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */ /* Add one frag and update the appropriate fields in the skb */
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
...@@ -1389,8 +1390,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -1389,8 +1390,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Unmap skb in the pool anyway, as we are going to change /* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */ fails. */
pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size, PCI_DMA_FROMDEVICE); bp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_skb)) { if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */ /* fix ip xsum and give it to the stack */
...@@ -1620,10 +1621,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) ...@@ -1620,10 +1621,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
} }
} }
pci_dma_sync_single_for_device(bp->pdev, dma_sync_single_for_device(&bp->pdev->dev,
pci_unmap_addr(rx_buf, mapping), dma_unmap_addr(rx_buf, mapping),
pad + RX_COPY_THRESH, pad + RX_COPY_THRESH,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
prefetch(skb); prefetch(skb);
prefetch(((char *)(skb)) + 128); prefetch(((char *)(skb)) + 128);
...@@ -1665,10 +1666,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) ...@@ -1665,10 +1666,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
} else } else
if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
pci_unmap_single(bp->pdev, dma_unmap_single(&bp->pdev->dev,
pci_unmap_addr(rx_buf, mapping), dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size, bp->rx_buf_size,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
skb_reserve(skb, pad); skb_reserve(skb, pad);
skb_put(skb, len); skb_put(skb, len);
...@@ -4940,9 +4941,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, ...@@ -4940,9 +4941,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
} }
if (fp->tpa_state[i] == BNX2X_TPA_START) if (fp->tpa_state[i] == BNX2X_TPA_START)
pci_unmap_single(bp->pdev, dma_unmap_single(&bp->pdev->dev,
pci_unmap_addr(rx_buf, mapping), dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size, PCI_DMA_FROMDEVICE); bp->rx_buf_size, DMA_FROM_DEVICE);
dev_kfree_skb(skb); dev_kfree_skb(skb);
rx_buf->skb = NULL; rx_buf->skb = NULL;
...@@ -4978,7 +4979,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) ...@@ -4978,7 +4979,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->disable_tpa = 1; fp->disable_tpa = 1;
break; break;
} }
pci_unmap_addr_set((struct sw_rx_bd *) dma_unmap_addr_set((struct sw_rx_bd *)
&bp->fp->tpa_pool[i], &bp->fp->tpa_pool[i],
mapping, 0); mapping, 0);
fp->tpa_state[i] = BNX2X_TPA_STOP; fp->tpa_state[i] = BNX2X_TPA_STOP;
...@@ -5658,8 +5659,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) ...@@ -5658,8 +5659,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
static int bnx2x_gunzip_init(struct bnx2x *bp) static int bnx2x_gunzip_init(struct bnx2x *bp)
{ {
bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
&bp->gunzip_mapping); &bp->gunzip_mapping, GFP_KERNEL);
if (bp->gunzip_buf == NULL) if (bp->gunzip_buf == NULL)
goto gunzip_nomem1; goto gunzip_nomem1;
...@@ -5679,8 +5680,8 @@ static int bnx2x_gunzip_init(struct bnx2x *bp) ...@@ -5679,8 +5680,8 @@ static int bnx2x_gunzip_init(struct bnx2x *bp)
bp->strm = NULL; bp->strm = NULL;
gunzip_nomem2: gunzip_nomem2:
pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
bp->gunzip_mapping); bp->gunzip_mapping);
bp->gunzip_buf = NULL; bp->gunzip_buf = NULL;
gunzip_nomem1: gunzip_nomem1:
...@@ -5696,8 +5697,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp) ...@@ -5696,8 +5697,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
bp->strm = NULL; bp->strm = NULL;
if (bp->gunzip_buf) { if (bp->gunzip_buf) {
pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
bp->gunzip_mapping); bp->gunzip_mapping);
bp->gunzip_buf = NULL; bp->gunzip_buf = NULL;
} }
} }
...@@ -6692,7 +6693,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) ...@@ -6692,7 +6693,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
#define BNX2X_PCI_FREE(x, y, size) \ #define BNX2X_PCI_FREE(x, y, size) \
do { \ do { \
if (x) { \ if (x) { \
pci_free_consistent(bp->pdev, size, x, y); \ dma_free_coherent(&bp->pdev->dev, size, x, y); \
x = NULL; \ x = NULL; \
y = 0; \ y = 0; \
} \ } \
...@@ -6773,7 +6774,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) ...@@ -6773,7 +6774,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
#define BNX2X_PCI_ALLOC(x, y, size) \ #define BNX2X_PCI_ALLOC(x, y, size) \
do { \ do { \
x = pci_alloc_consistent(bp->pdev, size, y); \ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x == NULL) \ if (x == NULL) \
goto alloc_mem_err; \ goto alloc_mem_err; \
memset(x, 0, size); \ memset(x, 0, size); \
...@@ -6906,9 +6907,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) ...@@ -6906,9 +6907,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
if (skb == NULL) if (skb == NULL)
continue; continue;
pci_unmap_single(bp->pdev, dma_unmap_single(&bp->pdev->dev,
pci_unmap_addr(rx_buf, mapping), dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size, PCI_DMA_FROMDEVICE); bp->rx_buf_size, DMA_FROM_DEVICE);
rx_buf->skb = NULL; rx_buf->skb = NULL;
dev_kfree_skb(skb); dev_kfree_skb(skb);
...@@ -10269,8 +10270,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) ...@@ -10269,8 +10270,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
bd_prod = TX_BD(fp_tx->tx_bd_prod); bd_prod = TX_BD(fp_tx->tx_bd_prod);
tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
mapping = pci_map_single(bp->pdev, skb->data, mapping = dma_map_single(&bp->pdev->dev, skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE); skb_headlen(skb), DMA_TO_DEVICE);
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
...@@ -11316,8 +11317,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -11316,8 +11317,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
} }
mapping = pci_map_single(bp->pdev, skb->data, mapping = dma_map_single(&bp->pdev->dev, skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE); skb_headlen(skb), DMA_TO_DEVICE);
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
...@@ -11374,8 +11375,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -11374,8 +11375,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (total_pkt_bd == NULL) if (total_pkt_bd == NULL)
total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, mapping = dma_map_page(&bp->pdev->dev, frag->page,
frag->size, PCI_DMA_TODEVICE); frag->page_offset,
frag->size, DMA_TO_DEVICE);
tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
...@@ -11832,15 +11834,15 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, ...@@ -11832,15 +11834,15 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
goto err_out_release; goto err_out_release;
} }
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
bp->flags |= USING_DAC_FLAG; bp->flags |= USING_DAC_FLAG;
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
pr_err("pci_set_consistent_dma_mask failed, aborting\n"); pr_err("dma_set_coherent_mask failed, aborting\n");
rc = -EIO; rc = -EIO;
goto err_out_release; goto err_out_release;
} }
} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
pr_err("System does not support DMA, aborting\n"); pr_err("System does not support DMA, aborting\n");
rc = -EIO; rc = -EIO;
goto err_out_release; goto err_out_release;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册