提交 1f9061d2 编写于 作者: J Joe Perches 提交者: David S. Miller

drivers:net: dma_alloc_coherent: use __GFP_ZERO instead of memset(, 0)

Reduce the number of calls required to alloc
a zeroed block of memory.

Trivially reduces overall object size.

Other changes around these removals
o Neaten call argument alignment
o Remove an unnecessary OOM message after dma_alloc_coherent failure
o Remove unnecessary gfp_t stack variable
Signed-off-by: NJoe Perches <joe@perches.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 7f9421c2
...@@ -1466,25 +1466,21 @@ static int greth_of_probe(struct platform_device *ofdev) ...@@ -1466,25 +1466,21 @@ static int greth_of_probe(struct platform_device *ofdev)
/* Allocate TX descriptor ring in coherent memory */ /* Allocate TX descriptor ring in coherent memory */
greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024, greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
&greth->tx_bd_base_phys, &greth->tx_bd_base_phys,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!greth->tx_bd_base) { if (!greth->tx_bd_base) {
err = -ENOMEM; err = -ENOMEM;
goto error3; goto error3;
} }
memset(greth->tx_bd_base, 0, 1024);
/* Allocate RX descriptor ring in coherent memory */ /* Allocate RX descriptor ring in coherent memory */
greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024, greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
&greth->rx_bd_base_phys, &greth->rx_bd_base_phys,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!greth->rx_bd_base) { if (!greth->rx_bd_base) {
err = -ENOMEM; err = -ENOMEM;
goto error4; goto error4;
} }
memset(greth->rx_bd_base, 0, 1024);
/* Get MAC address from: module param, OF property or ID prom */ /* Get MAC address from: module param, OF property or ID prom */
for (i = 0; i < 6; i++) { for (i = 0; i < 6; i++) {
if (macaddr[i] != 0) if (macaddr[i] != 0)
......
...@@ -862,25 +862,25 @@ static int bcm_enet_open(struct net_device *dev) ...@@ -862,25 +862,25 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate rx dma ring */ /* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
GFP_KERNEL | __GFP_ZERO);
if (!p) { if (!p) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_freeirq_tx; goto out_freeirq_tx;
} }
memset(p, 0, size);
priv->rx_desc_alloc_size = size; priv->rx_desc_alloc_size = size;
priv->rx_desc_cpu = p; priv->rx_desc_cpu = p;
/* allocate tx dma ring */ /* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
GFP_KERNEL | __GFP_ZERO);
if (!p) { if (!p) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_free_rx_ring; goto out_free_rx_ring;
} }
memset(p, 0, size);
priv->tx_desc_alloc_size = size; priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p; priv->tx_desc_cpu = p;
......
...@@ -854,12 +854,11 @@ bnx2_alloc_mem(struct bnx2 *bp) ...@@ -854,12 +854,11 @@ bnx2_alloc_mem(struct bnx2 *bp)
sizeof(struct statistics_block); sizeof(struct statistics_block);
status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
&bp->status_blk_mapping, GFP_KERNEL); &bp->status_blk_mapping,
GFP_KERNEL | __GFP_ZERO);
if (status_blk == NULL) if (status_blk == NULL)
goto alloc_mem_err; goto alloc_mem_err;
memset(status_blk, 0, bp->status_stats_size);
bnapi = &bp->bnx2_napi[0]; bnapi = &bp->bnx2_napi[0];
bnapi->status_blk.msi = status_blk; bnapi->status_blk.msi = status_blk;
bnapi->hw_tx_cons_ptr = bnapi->hw_tx_cons_ptr =
......
...@@ -1946,12 +1946,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, ...@@ -1946,12 +1946,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf); bool is_pf);
#define BNX2X_ILT_ZALLOC(x, y, size) \ #define BNX2X_ILT_ZALLOC(x, y, size) \
do { \ x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ GFP_KERNEL | __GFP_ZERO)
if (x) \
memset(x, 0, size); \
} while (0)
#define BNX2X_ILT_FREE(x, y, size) \ #define BNX2X_ILT_FREE(x, y, size) \
do { \ do { \
......
...@@ -50,13 +50,13 @@ extern int int_mode; ...@@ -50,13 +50,13 @@ extern int int_mode;
} \ } \
} while (0) } while (0)
#define BNX2X_PCI_ALLOC(x, y, size) \ #define BNX2X_PCI_ALLOC(x, y, size) \
do { \ do { \
x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
if (x == NULL) \ GFP_KERNEL | __GFP_ZERO); \
goto alloc_mem_err; \ if (x == NULL) \
memset((void *)x, 0, size); \ goto alloc_mem_err; \
} while (0) } while (0)
#define BNX2X_ALLOC(x, size) \ #define BNX2X_ALLOC(x, size) \
do { \ do { \
......
...@@ -8172,11 +8172,9 @@ static int tg3_mem_rx_acquire(struct tg3 *tp) ...@@ -8172,11 +8172,9 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
TG3_RX_RCB_RING_BYTES(tp), TG3_RX_RCB_RING_BYTES(tp),
&tnapi->rx_rcb_mapping, &tnapi->rx_rcb_mapping,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!tnapi->rx_rcb) if (!tnapi->rx_rcb)
goto err_out; goto err_out;
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
} }
return 0; return 0;
...@@ -8226,12 +8224,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) ...@@ -8226,12 +8224,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
sizeof(struct tg3_hw_stats), sizeof(struct tg3_hw_stats),
&tp->stats_mapping, &tp->stats_mapping,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!tp->hw_stats) if (!tp->hw_stats)
goto err_out; goto err_out;
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
for (i = 0; i < tp->irq_cnt; i++) { for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i]; struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk; struct tg3_hw_status *sblk;
...@@ -8239,11 +8235,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) ...@@ -8239,11 +8235,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
TG3_HW_STATUS_SIZE, TG3_HW_STATUS_SIZE,
&tnapi->status_mapping, &tnapi->status_mapping,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!tnapi->hw_status) if (!tnapi->hw_status)
goto err_out; goto err_out;
memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
sblk = tnapi->hw_status; sblk = tnapi->hw_status;
if (tg3_flag(tp, ENABLE_RSS)) { if (tg3_flag(tp, ENABLE_RSS)) {
......
...@@ -1264,9 +1264,8 @@ bnad_mem_alloc(struct bnad *bnad, ...@@ -1264,9 +1264,8 @@ bnad_mem_alloc(struct bnad *bnad,
mem_info->mdl[i].len = mem_info->len; mem_info->mdl[i].len = mem_info->len;
mem_info->mdl[i].kva = mem_info->mdl[i].kva =
dma_alloc_coherent(&bnad->pcidev->dev, dma_alloc_coherent(&bnad->pcidev->dev,
mem_info->len, &dma_pa, mem_info->len, &dma_pa,
GFP_KERNEL); GFP_KERNEL);
if (mem_info->mdl[i].kva == NULL) if (mem_info->mdl[i].kva == NULL)
goto err_return; goto err_return;
......
...@@ -146,10 +146,9 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, ...@@ -146,10 +146,9 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
q->entry_size = entry_size; q->entry_size = entry_size;
mem->size = len * entry_size; mem->size = len * entry_size;
mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!mem->va) if (!mem->va)
return -ENOMEM; return -ENOMEM;
memset(mem->va, 0, mem->size);
return 0; return 0;
} }
...@@ -2569,10 +2568,9 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) ...@@ -2569,10 +2568,9 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (cmd.va == NULL) if (cmd.va == NULL)
return -1; return -1;
memset(cmd.va, 0, cmd.size);
if (enable) { if (enable) {
status = pci_write_config_dword(adapter->pdev, status = pci_write_config_dword(adapter->pdev,
...@@ -3794,12 +3792,13 @@ static int be_ctrl_init(struct be_adapter *adapter) ...@@ -3794,12 +3792,13 @@ static int be_ctrl_init(struct be_adapter *adapter)
rx_filter->size = sizeof(struct be_cmd_req_rx_filter); rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size, rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
&rx_filter->dma, GFP_KERNEL); &rx_filter->dma,
GFP_KERNEL | __GFP_ZERO);
if (rx_filter->va == NULL) { if (rx_filter->va == NULL) {
status = -ENOMEM; status = -ENOMEM;
goto free_mbox; goto free_mbox;
} }
memset(rx_filter->va, 0, rx_filter->size);
mutex_init(&adapter->mbox_lock); mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock); spin_lock_init(&adapter->mcc_cq_lock);
...@@ -3841,10 +3840,9 @@ static int be_stats_init(struct be_adapter *adapter) ...@@ -3841,10 +3840,9 @@ static int be_stats_init(struct be_adapter *adapter)
cmd->size = sizeof(struct be_cmd_req_get_stats_v1); cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (cmd->va == NULL) if (cmd->va == NULL)
return -1; return -1;
memset(cmd->va, 0, cmd->size);
return 0; return 0;
} }
......
...@@ -780,12 +780,11 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv) ...@@ -780,12 +780,11 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
priv->descs = dma_alloc_coherent(priv->dev, priv->descs = dma_alloc_coherent(priv->dev,
sizeof(struct ftgmac100_descs), sizeof(struct ftgmac100_descs),
&priv->descs_dma_addr, GFP_KERNEL); &priv->descs_dma_addr,
GFP_KERNEL | __GFP_ZERO);
if (!priv->descs) if (!priv->descs)
return -ENOMEM; return -ENOMEM;
memset(priv->descs, 0, sizeof(struct ftgmac100_descs));
/* initialize RX ring */ /* initialize RX ring */
ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
......
...@@ -732,13 +732,13 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv) ...@@ -732,13 +732,13 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
{ {
int i; int i;
priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs), priv->descs = dma_alloc_coherent(priv->dev,
&priv->descs_dma_addr, GFP_KERNEL); sizeof(struct ftmac100_descs),
&priv->descs_dma_addr,
GFP_KERNEL | __GFP_ZERO);
if (!priv->descs) if (!priv->descs)
return -ENOMEM; return -ENOMEM;
memset(priv->descs, 0, sizeof(struct ftmac100_descs));
/* initialize RX ring */ /* initialize RX ring */
ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
......
...@@ -638,12 +638,11 @@ static int mal_probe(struct platform_device *ofdev) ...@@ -638,12 +638,11 @@ static int mal_probe(struct platform_device *ofdev)
(NUM_TX_BUFF * mal->num_tx_chans + (NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans); NUM_RX_BUFF * mal->num_rx_chans);
mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (mal->bd_virt == NULL) { if (mal->bd_virt == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto fail_unmap; goto fail_unmap;
} }
memset(mal->bd_virt, 0, bd_size);
for (i = 0; i < mal->num_tx_chans; ++i) for (i = 0; i < mal->num_tx_chans; ++i)
set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
......
...@@ -1020,12 +1020,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) ...@@ -1020,12 +1020,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc); txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096); txdr->size = ALIGN(txdr->size, 4096);
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!txdr->desc) { if (!txdr->desc) {
ret_val = 2; ret_val = 2;
goto err_nomem; goto err_nomem;
} }
memset(txdr->desc, 0, txdr->size);
txdr->next_to_use = txdr->next_to_clean = 0; txdr->next_to_use = txdr->next_to_clean = 0;
ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
...@@ -1075,12 +1074,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) ...@@ -1075,12 +1074,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!rxdr->desc) { if (!rxdr->desc) {
ret_val = 5; ret_val = 5;
goto err_nomem; goto err_nomem;
} }
memset(rxdr->desc, 0, rxdr->size);
rxdr->next_to_use = rxdr->next_to_clean = 0; rxdr->next_to_use = rxdr->next_to_clean = 0;
rctl = er32(RCTL); rctl = er32(RCTL);
......
...@@ -447,7 +447,6 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, ...@@ -447,7 +447,6 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL); &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc) if (!tx_ring->desc)
goto err; goto err;
...@@ -488,7 +487,6 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, ...@@ -488,7 +487,6 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL); &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) if (!rx_ring->desc)
goto err; goto err;
......
...@@ -717,12 +717,11 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) ...@@ -717,12 +717,11 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = ALIGN(txdr->size, 4096); txdr->size = ALIGN(txdr->size, 4096);
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!txdr->desc) { if (!txdr->desc) {
vfree(txdr->buffer_info); vfree(txdr->buffer_info);
return -ENOMEM; return -ENOMEM;
} }
memset(txdr->desc, 0, txdr->size);
txdr->next_to_use = 0; txdr->next_to_use = 0;
txdr->next_to_clean = 0; txdr->next_to_clean = 0;
......
...@@ -584,12 +584,14 @@ static int init_hash_table(struct pxa168_eth_private *pep) ...@@ -584,12 +584,14 @@ static int init_hash_table(struct pxa168_eth_private *pep)
*/ */
if (pep->htpr == NULL) { if (pep->htpr == NULL) {
pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
HASH_ADDR_TABLE_SIZE, HASH_ADDR_TABLE_SIZE,
&pep->htpr_dma, GFP_KERNEL); &pep->htpr_dma,
GFP_KERNEL | __GFP_ZERO);
if (pep->htpr == NULL) if (pep->htpr == NULL)
return -ENOMEM; return -ENOMEM;
} else {
memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
} }
memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
wrl(pep, HTPR, pep->htpr_dma); wrl(pep, HTPR, pep->htpr_dma);
return 0; return 0;
} }
...@@ -1023,11 +1025,11 @@ static int rxq_init(struct net_device *dev) ...@@ -1023,11 +1025,11 @@ static int rxq_init(struct net_device *dev)
size = pep->rx_ring_size * sizeof(struct rx_desc); size = pep->rx_ring_size * sizeof(struct rx_desc);
pep->rx_desc_area_size = size; pep->rx_desc_area_size = size;
pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
&pep->rx_desc_dma, GFP_KERNEL); &pep->rx_desc_dma,
GFP_KERNEL | __GFP_ZERO);
if (!pep->p_rx_desc_area) if (!pep->p_rx_desc_area)
goto out; goto out;
memset((void *)pep->p_rx_desc_area, 0, size);
/* initialize the next_desc_ptr links in the Rx descriptors ring */ /* initialize the next_desc_ptr links in the Rx descriptors ring */
p_rx_desc = pep->p_rx_desc_area; p_rx_desc = pep->p_rx_desc_area;
for (i = 0; i < rx_desc_num; i++) { for (i = 0; i < rx_desc_num; i++) {
...@@ -1084,10 +1086,10 @@ static int txq_init(struct net_device *dev) ...@@ -1084,10 +1086,10 @@ static int txq_init(struct net_device *dev)
size = pep->tx_ring_size * sizeof(struct tx_desc); size = pep->tx_ring_size * sizeof(struct tx_desc);
pep->tx_desc_area_size = size; pep->tx_desc_area_size = size;
pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
&pep->tx_desc_dma, GFP_KERNEL); &pep->tx_desc_dma,
GFP_KERNEL | __GFP_ZERO);
if (!pep->p_tx_desc_area) if (!pep->p_tx_desc_area)
goto out; goto out;
memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
/* Initialize the next_desc_ptr links in the Tx descriptors ring */ /* Initialize the next_desc_ptr links in the Tx descriptors ring */
p_tx_desc = pep->p_tx_desc_area; p_tx_desc = pep->p_tx_desc_area;
for (i = 0; i < tx_desc_num; i++) { for (i = 0; i < tx_desc_num; i++) {
......
...@@ -3592,10 +3592,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) ...@@ -3592,10 +3592,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
&ss->rx_done.bus, &ss->rx_done.bus,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (ss->rx_done.entry == NULL) if (ss->rx_done.entry == NULL)
goto abort; goto abort;
memset(ss->rx_done.entry, 0, bytes);
bytes = sizeof(*ss->fw_stats); bytes = sizeof(*ss->fw_stats);
ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes, ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
&ss->fw_stats_bus, &ss->fw_stats_bus,
......
...@@ -1470,11 +1470,10 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, ...@@ -1470,11 +1470,10 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
&rx_ring->rx_buff_pool_logic, &rx_ring->rx_buff_pool_logic,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!rx_ring->rx_buff_pool) if (!rx_ring->rx_buff_pool)
return -ENOMEM; return -ENOMEM;
memset(rx_ring->rx_buff_pool, 0, size);
rx_ring->rx_buff_pool_size = size; rx_ring->rx_buff_pool_size = size;
for (i = 0; i < rx_ring->count; i++) { for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i]; buffer_info = &rx_ring->buffer_info[i];
...@@ -1773,12 +1772,12 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, ...@@ -1773,12 +1772,12 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL); &tx_ring->dma,
GFP_KERNEL | __GFP_ZERO);
if (!tx_ring->desc) { if (!tx_ring->desc) {
vfree(tx_ring->buffer_info); vfree(tx_ring->buffer_info);
return -ENOMEM; return -ENOMEM;
} }
memset(tx_ring->desc, 0, tx_ring->size);
tx_ring->next_to_use = 0; tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0; tx_ring->next_to_clean = 0;
...@@ -1818,12 +1817,12 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, ...@@ -1818,12 +1817,12 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL); &rx_ring->dma,
GFP_KERNEL | __GFP_ZERO);
if (!rx_ring->desc) { if (!rx_ring->desc) {
vfree(rx_ring->buffer_info); vfree(rx_ring->buffer_info);
return -ENOMEM; return -ENOMEM;
} }
memset(rx_ring->desc, 0, rx_ring->size);
rx_ring->next_to_clean = 0; rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0; rx_ring->next_to_use = 0;
for (desNo = 0; desNo < rx_ring->count; desNo++) { for (desNo = 0; desNo < rx_ring->count; desNo++) {
......
...@@ -441,12 +441,11 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev) ...@@ -441,12 +441,11 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
RX_RING_SIZE * sizeof(u64), RX_RING_SIZE * sizeof(u64),
&ring->buf_dma, GFP_KERNEL); &ring->buf_dma,
GFP_KERNEL | __GFP_ZERO);
if (!ring->buffers) if (!ring->buffers)
goto out_ring_desc; goto out_ring_desc;
memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno), write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma)); PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
......
...@@ -422,22 +422,20 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, ...@@ -422,22 +422,20 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
&rq_phys_addr, GFP_KERNEL); &rq_phys_addr, GFP_KERNEL | __GFP_ZERO);
if (!rq_addr) if (!rq_addr)
return -ENOMEM; return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
&rsp_phys_addr, GFP_KERNEL); &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO);
if (!rsp_addr) { if (!rsp_addr) {
err = -ENOMEM; err = -ENOMEM;
goto out_free_rq; goto out_free_rq;
} }
memset(rq_addr, 0, rq_size);
prq = rq_addr; prq = rq_addr;
memset(rsp_addr, 0, rsp_size);
prsp = rsp_addr; prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
...@@ -744,10 +742,9 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, ...@@ -744,10 +742,9 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
size_t nic_size = sizeof(struct qlcnic_info_le); size_t nic_size = sizeof(struct qlcnic_info_le);
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
&nic_dma_t, GFP_KERNEL); &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
if (!nic_info_addr) if (!nic_info_addr)
return -ENOMEM; return -ENOMEM;
memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr; nic_info = nic_info_addr;
...@@ -795,11 +792,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, ...@@ -795,11 +792,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
return err; return err;
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
&nic_dma_t, GFP_KERNEL); &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
if (!nic_info_addr) if (!nic_info_addr)
return -ENOMEM; return -ENOMEM;
memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr; nic_info = nic_info_addr;
nic_info->pci_func = cpu_to_le16(nic->pci_func); nic_info->pci_func = cpu_to_le16(nic->pci_func);
...@@ -845,10 +841,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, ...@@ -845,10 +841,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
&pci_info_dma_t, GFP_KERNEL); &pci_info_dma_t,
GFP_KERNEL | __GFP_ZERO);
if (!pci_info_addr) if (!pci_info_addr)
return -ENOMEM; return -ENOMEM;
memset(pci_info_addr, 0, pci_size);
npar = pci_info_addr; npar = pci_info_addr;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
...@@ -940,12 +936,10 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, ...@@ -940,12 +936,10 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
} }
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
&stats_dma_t, GFP_KERNEL); &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
if (!stats_addr) if (!stats_addr)
return -ENOMEM; return -ENOMEM;
memset(stats_addr, 0, stats_size);
arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
arg1 |= rx_tx << 15 | stats_size << 16; arg1 |= rx_tx << 15 | stats_size << 16;
...@@ -993,11 +987,10 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, ...@@ -993,11 +987,10 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
return -ENOMEM; return -ENOMEM;
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
&stats_dma_t, GFP_KERNEL); &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
if (!stats_addr) if (!stats_addr)
return -ENOMEM; return -ENOMEM;
memset(stats_addr, 0, stats_size);
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
cmd.req.arg[1] = stats_size << 16; cmd.req.arg[1] = stats_size << 16;
cmd.req.arg[2] = MSD(stats_dma_t); cmd.req.arg[2] = MSD(stats_dma_t);
......
...@@ -305,11 +305,11 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, ...@@ -305,11 +305,11 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len) unsigned int len)
{ {
buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
&buffer->dma_addr, GFP_ATOMIC); &buffer->dma_addr,
GFP_ATOMIC | __GFP_ZERO);
if (!buffer->addr) if (!buffer->addr)
return -ENOMEM; return -ENOMEM;
buffer->len = len; buffer->len = len;
memset(buffer->addr, 0, len);
return 0; return 0;
} }
......
...@@ -213,10 +213,11 @@ static int meth_init_tx_ring(struct meth_private *priv) ...@@ -213,10 +213,11 @@ static int meth_init_tx_ring(struct meth_private *priv)
{ {
/* Init TX ring */ /* Init TX ring */
priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
&priv->tx_ring_dma, GFP_ATOMIC); &priv->tx_ring_dma,
GFP_ATOMIC | __GFP_ZERO);
if (!priv->tx_ring) if (!priv->tx_ring)
return -ENOMEM; return -ENOMEM;
memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE);
priv->tx_count = priv->tx_read = priv->tx_write = 0; priv->tx_count = priv->tx_read = priv->tx_write = 0;
mace->eth.tx_ring_base = priv->tx_ring_dma; mace->eth.tx_ring_base = priv->tx_ring_dma;
/* Now init skb save area */ /* Now init skb save area */
......
...@@ -352,8 +352,7 @@ spider_net_init_chain(struct spider_net_card *card, ...@@ -352,8 +352,7 @@ spider_net_init_chain(struct spider_net_card *card,
alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr); alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size, chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
&chain->dma_addr, GFP_KERNEL); &chain->dma_addr, GFP_KERNEL);
if (!chain->hwring) if (!chain->hwring)
return -ENOMEM; return -ENOMEM;
......
...@@ -1308,21 +1308,16 @@ static int tsi108_open(struct net_device *dev) ...@@ -1308,21 +1308,16 @@ static int tsi108_open(struct net_device *dev)
data->id, dev->irq, dev->name); data->id, dev->irq, dev->name);
} }
data->rxring = dma_alloc_coherent(NULL, rxring_size, data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma,
&data->rxdma, GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!data->rxring) { if (!data->rxring)
return -ENOMEM; return -ENOMEM;
} else {
memset(data->rxring, 0, rxring_size);
}
data->txring = dma_alloc_coherent(NULL, txring_size, data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma,
&data->txdma, GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!data->txring) { if (!data->txring) {
pci_free_consistent(0, rxring_size, data->rxring, data->rxdma); pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
return -ENOMEM; return -ENOMEM;
} else {
memset(data->txring, 0, txring_size);
} }
for (i = 0; i < TSI108_RXRING_LEN; i++) { for (i = 0; i < TSI108_RXRING_LEN; i++) {
......
...@@ -245,23 +245,21 @@ static int temac_dma_bd_init(struct net_device *ndev) ...@@ -245,23 +245,21 @@ static int temac_dma_bd_init(struct net_device *ndev)
/* returns a virtual address and a physical address. */ /* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM, sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, GFP_KERNEL); &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO);
if (!lp->tx_bd_v) if (!lp->tx_bd_v)
goto out; goto out;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM, sizeof(*lp->rx_bd_v) * RX_BD_NUM,
&lp->rx_bd_p, GFP_KERNEL); &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO);
if (!lp->rx_bd_v) if (!lp->rx_bd_v)
goto out; goto out;
memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) { for (i = 0; i < TX_BD_NUM; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p + lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
} }
memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
for (i = 0; i < RX_BD_NUM; i++) { for (i = 0; i < RX_BD_NUM; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p + lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
......
...@@ -204,25 +204,23 @@ static int axienet_dma_bd_init(struct net_device *ndev) ...@@ -204,25 +204,23 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM, sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, &lp->tx_bd_p,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!lp->tx_bd_v) if (!lp->tx_bd_v)
goto out; goto out;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM, sizeof(*lp->rx_bd_v) * RX_BD_NUM,
&lp->rx_bd_p, &lp->rx_bd_p,
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!lp->rx_bd_v) if (!lp->rx_bd_v)
goto out; goto out;
memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) { for (i = 0; i < TX_BD_NUM; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p + lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) * sizeof(*lp->tx_bd_v) *
((i + 1) % TX_BD_NUM); ((i + 1) % TX_BD_NUM);
} }
memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
for (i = 0; i < RX_BD_NUM; i++) { for (i = 0; i < RX_BD_NUM; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p + lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * sizeof(*lp->rx_bd_v) *
......
...@@ -1070,11 +1070,10 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name, ...@@ -1070,11 +1070,10 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
(PI_ALIGN_K_DESC_BLK - 1); (PI_ALIGN_K_DESC_BLK - 1);
bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
&bp->kmalloced_dma, &bp->kmalloced_dma,
GFP_ATOMIC); GFP_ATOMIC | __GFP_ZERO);
if (top_v == NULL) if (top_v == NULL)
return DFX_K_FAILURE; return DFX_K_FAILURE;
memset(top_v, 0, alloc_size); /* zero out memory before continuing */
top_p = bp->kmalloced_dma; /* get physical address of buffer */ top_p = bp->kmalloced_dma; /* get physical address of buffer */
/* /*
......
...@@ -352,21 +352,19 @@ static int ali_ircc_open(int i, chipio_t *info) ...@@ -352,21 +352,19 @@ static int ali_ircc_open(int i, chipio_t *info)
/* Allocate memory if needed */ /* Allocate memory if needed */
self->rx_buff.head = self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize, dma_alloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL); &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) { if (self->rx_buff.head == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto err_out2; goto err_out2;
} }
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head = self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize, dma_alloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL); &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) { if (self->tx_buff.head == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto err_out3; goto err_out3;
} }
memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE; self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME; self->rx_buff.state = OUTSIDE_FRAME;
......
...@@ -431,22 +431,20 @@ static int __init nsc_ircc_open(chipio_t *info) ...@@ -431,22 +431,20 @@ static int __init nsc_ircc_open(chipio_t *info)
/* Allocate memory if needed */ /* Allocate memory if needed */
self->rx_buff.head = self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize, dma_alloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL); &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) { if (self->rx_buff.head == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto out2; goto out2;
} }
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head = self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize, dma_alloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL); &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) { if (self->tx_buff.head == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto out3; goto out3;
} }
memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE; self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME; self->rx_buff.state = OUTSIDE_FRAME;
......
...@@ -700,12 +700,12 @@ static int pxa_irda_start(struct net_device *dev) ...@@ -700,12 +700,12 @@ static int pxa_irda_start(struct net_device *dev)
err = -ENOMEM; err = -ENOMEM;
si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
&si->dma_rx_buff_phy, GFP_KERNEL ); &si->dma_rx_buff_phy, GFP_KERNEL);
if (!si->dma_rx_buff) if (!si->dma_rx_buff)
goto err_dma_rx_buff; goto err_dma_rx_buff;
si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
&si->dma_tx_buff_phy, GFP_KERNEL ); &si->dma_tx_buff_phy, GFP_KERNEL);
if (!si->dma_tx_buff) if (!si->dma_tx_buff)
goto err_dma_tx_buff; goto err_dma_tx_buff;
......
...@@ -563,19 +563,16 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, ...@@ -563,19 +563,16 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
self->rx_buff.head = self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize, dma_alloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL); &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) if (self->rx_buff.head == NULL)
goto err_out2; goto err_out2;
self->tx_buff.head = self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize, dma_alloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL); &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) if (self->tx_buff.head == NULL)
goto err_out3; goto err_out3;
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE; self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME; self->rx_buff.state = OUTSIDE_FRAME;
self->tx_buff.data = self->tx_buff.head; self->tx_buff.data = self->tx_buff.head;
......
...@@ -364,21 +364,19 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id) ...@@ -364,21 +364,19 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
/* Allocate memory if needed */ /* Allocate memory if needed */
self->rx_buff.head = self->rx_buff.head =
dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize, dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL); &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) { if (self->rx_buff.head == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto err_out2; goto err_out2;
} }
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head = self->tx_buff.head =
dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize, dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL); &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) { if (self->tx_buff.head == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto err_out3; goto err_out3;
} }
memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE; self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME; self->rx_buff.state = OUTSIDE_FRAME;
......
...@@ -216,22 +216,19 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq, ...@@ -216,22 +216,19 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
/* Allocate memory if needed */ /* Allocate memory if needed */
self->rx_buff.head = self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize, dma_alloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL); &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) { if (self->rx_buff.head == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto err_out1; goto err_out1;
} }
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head = self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize, dma_alloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL); &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) { if (self->tx_buff.head == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto err_out2; goto err_out2;
} }
memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE; self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME; self->rx_buff.state = OUTSIDE_FRAME;
......
...@@ -419,8 +419,6 @@ static inline ...@@ -419,8 +419,6 @@ static inline
static int alloc_ringmemory(struct b43_dmaring *ring) static int alloc_ringmemory(struct b43_dmaring *ring)
{ {
gfp_t flags = GFP_KERNEL;
/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
* alignment and 8K buffers for 64-bit DMA with 8K alignment. * alignment and 8K buffers for 64-bit DMA with 8K alignment.
* In practice we could use smaller buffers for the latter, but the * In practice we could use smaller buffers for the latter, but the
...@@ -435,12 +433,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring) ...@@ -435,12 +433,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
ring_mem_size, &(ring->dmabase), ring_mem_size, &(ring->dmabase),
flags); GFP_KERNEL | __GFP_ZERO);
if (!ring->descbase) { if (!ring->descbase)
b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
return -ENOMEM; return -ENOMEM;
}
memset(ring->descbase, 0, ring_mem_size);
return 0; return 0;
} }
......
...@@ -334,10 +334,9 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring) ...@@ -334,10 +334,9 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring)
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
B43legacy_DMA_RINGMEMSIZE, B43legacy_DMA_RINGMEMSIZE,
&(ring->dmabase), &(ring->dmabase),
GFP_KERNEL); GFP_KERNEL | __GFP_ZERO);
if (!ring->descbase) if (!ring->descbase)
return -ENOMEM; return -ENOMEM;
memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE);
return 0; return 0;
} }
......
...@@ -1921,8 +1921,8 @@ il4965_tx_skb(struct il_priv *il, ...@@ -1921,8 +1921,8 @@ il4965_tx_skb(struct il_priv *il,
static inline int static inline int
il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size) il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
{ {
ptr->addr = ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL); GFP_KERNEL);
if (!ptr->addr) if (!ptr->addr)
return -ENOMEM; return -ENOMEM;
ptr->size = size; ptr->size = size;
......
...@@ -2566,15 +2566,13 @@ il_rx_queue_alloc(struct il_priv *il) ...@@ -2566,15 +2566,13 @@ il_rx_queue_alloc(struct il_priv *il)
INIT_LIST_HEAD(&rxq->rx_used); INIT_LIST_HEAD(&rxq->rx_used);
/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
rxq->bd = rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, GFP_KERNEL);
GFP_KERNEL);
if (!rxq->bd) if (!rxq->bd)
goto err_bd; goto err_bd;
rxq->rb_stts = rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
dma_alloc_coherent(dev, sizeof(struct il_rb_status), &rxq->rb_stts_dma, GFP_KERNEL);
&rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts) if (!rxq->rb_stts)
goto err_rb; goto err_rb;
......
...@@ -2235,9 +2235,8 @@ il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc) ...@@ -2235,9 +2235,8 @@ il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
return -EINVAL; return -EINVAL;
} }
desc->v_addr = desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr, &desc->p_addr, GFP_KERNEL);
GFP_KERNEL);
return (desc->v_addr != NULL) ? 0 : -ENOMEM; return (desc->v_addr != NULL) ? 0 : -ENOMEM;
} }
......
...@@ -124,12 +124,10 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, ...@@ -124,12 +124,10 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
*/ */
addr = dma_alloc_coherent(rt2x00dev->dev, addr = dma_alloc_coherent(rt2x00dev->dev,
queue->limit * queue->desc_size, queue->limit * queue->desc_size,
&dma, GFP_KERNEL); &dma, GFP_KERNEL | __GFP_ZERO);
if (!addr) if (!addr)
return -ENOMEM; return -ENOMEM;
memset(addr, 0, queue->limit * queue->desc_size);
/* /*
* Initialize all queue entries to contain valid addresses. * Initialize all queue entries to contain valid addresses.
*/ */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册