提交 da5ec7f2 编写于 作者: O Ong Boon Leong 提交者: David S. Miller

net: stmmac: refactor stmmac_init_rx_buffers for stmmac_reinit_rx_buffers

The per-queue RX buffer allocation in stmmac_reinit_rx_buffers() can be
made to use stmmac_alloc_rx_buffers() by merging the page_pool alloc
checks for "buf->page" and "buf->sec_page" in stmmac_init_rx_buffers().

This is in preparation for XSK pool allocation later.
Signed-off-by: NOng Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 80f573c9
...@@ -1388,12 +1388,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, ...@@ -1388,12 +1388,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); if (!buf->page) {
if (!buf->page) buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
return -ENOMEM; if (!buf->page)
buf->page_offset = stmmac_rx_offset(priv); return -ENOMEM;
buf->page_offset = stmmac_rx_offset(priv);
}
if (priv->sph) { if (priv->sph && !buf->sec_page) {
buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
if (!buf->sec_page) if (!buf->sec_page)
return -ENOMEM; return -ENOMEM;
...@@ -1547,48 +1549,16 @@ static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv) ...@@ -1547,48 +1549,16 @@ static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
{ {
u32 rx_count = priv->plat->rx_queues_to_use; u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue; u32 queue;
int i;
for (queue = 0; queue < rx_count; queue++) for (queue = 0; queue < rx_count; queue++)
dma_recycle_rx_skbufs(priv, queue); dma_recycle_rx_skbufs(priv, queue);
for (queue = 0; queue < rx_count; queue++) { for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; int ret;
for (i = 0; i < priv->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
struct dma_desc *p;
if (priv->extend_desc)
p = &((rx_q->dma_erx + i)->basic);
else
p = rx_q->dma_rx + i;
if (!buf->page) {
buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
if (!buf->page)
goto err_reinit_rx_buffers;
buf->addr = page_pool_get_dma_addr(buf->page) +
buf->page_offset;
}
if (priv->sph && !buf->sec_page) {
buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
if (!buf->sec_page)
goto err_reinit_rx_buffers;
buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
}
stmmac_set_desc_addr(priv, p, buf->addr); ret = stmmac_alloc_rx_buffers(priv, queue, GFP_KERNEL);
if (priv->sph) if (ret < 0)
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); goto err_reinit_rx_buffers;
else
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
}
} }
return; return;
...@@ -1791,153 +1761,173 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) ...@@ -1791,153 +1761,173 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
} }
/** /**
* free_dma_rx_desc_resources - free RX dma desc resources * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
* @priv: private structure * @priv: private structure
* @queue: RX queue index
*/ */
static void free_dma_rx_desc_resources(struct stmmac_priv *priv) static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
{ {
u32 rx_count = priv->plat->rx_queues_to_use; struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
u32 queue;
/* Free RX queue resources */ /* Release the DMA RX socket buffers */
for (queue = 0; queue < rx_count; queue++) { dma_free_rx_skbufs(priv, queue);
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
/* Release the DMA RX socket buffers */ /* Free DMA regions of consistent memory previously allocated */
dma_free_rx_skbufs(priv, queue); if (!priv->extend_desc)
dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy);
else
dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
/* Free DMA regions of consistent memory previously allocated */ if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
if (!priv->extend_desc) xdp_rxq_info_unreg(&rx_q->xdp_rxq);
dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy);
else
dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) kfree(rx_q->buf_pool);
xdp_rxq_info_unreg(&rx_q->xdp_rxq); if (rx_q->page_pool)
page_pool_destroy(rx_q->page_pool);
}
kfree(rx_q->buf_pool); static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
if (rx_q->page_pool) {
page_pool_destroy(rx_q->page_pool); u32 rx_count = priv->plat->rx_queues_to_use;
} u32 queue;
/* Free RX queue resources */
for (queue = 0; queue < rx_count; queue++)
__free_dma_rx_desc_resources(priv, queue);
} }
/** /**
* free_dma_tx_desc_resources - free TX dma desc resources * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
* @priv: private structure * @priv: private structure
* @queue: TX queue index
*/ */
static void free_dma_tx_desc_resources(struct stmmac_priv *priv) static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
{ {
u32 tx_count = priv->plat->tx_queues_to_use; struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
u32 queue; size_t size;
void *addr;
/* Free TX queue resources */ /* Release the DMA TX socket buffers */
for (queue = 0; queue < tx_count; queue++) { dma_free_tx_skbufs(priv, queue);
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
size_t size; if (priv->extend_desc) {
void *addr; size = sizeof(struct dma_extended_desc);
addr = tx_q->dma_etx;
} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
size = sizeof(struct dma_edesc);
addr = tx_q->dma_entx;
} else {
size = sizeof(struct dma_desc);
addr = tx_q->dma_tx;
}
/* Release the DMA TX socket buffers */ size *= priv->dma_tx_size;
dma_free_tx_skbufs(priv, queue);
if (priv->extend_desc) { dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
size = sizeof(struct dma_extended_desc);
addr = tx_q->dma_etx;
} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
size = sizeof(struct dma_edesc);
addr = tx_q->dma_entx;
} else {
size = sizeof(struct dma_desc);
addr = tx_q->dma_tx;
}
size *= priv->dma_tx_size; kfree(tx_q->tx_skbuff_dma);
kfree(tx_q->tx_skbuff);
}
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
{
u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
kfree(tx_q->tx_skbuff_dma); /* Free TX queue resources */
kfree(tx_q->tx_skbuff); for (queue = 0; queue < tx_count; queue++)
} __free_dma_tx_desc_resources(priv, queue);
} }
/** /**
* alloc_dma_rx_desc_resources - alloc RX resources. * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
* @priv: private structure * @priv: private structure
* @queue: RX queue index
* Description: according to which descriptor can be used (extend or basic) * Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of * this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to * reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism. * allow zero-copy mechanism.
*/ */
static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv); bool xdp_prog = stmmac_xdp_is_enabled(priv);
u32 rx_count = priv->plat->rx_queues_to_use; struct page_pool_params pp_params = { 0 };
int ret = -ENOMEM; unsigned int num_pages;
u32 queue; int ret;
/* RX queues buffers and DMA */ rx_q->queue_index = queue;
for (queue = 0; queue < rx_count; queue++) { rx_q->priv_data = priv;
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue]; pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
struct page_pool_params pp_params = { 0 }; pp_params.pool_size = priv->dma_rx_size;
unsigned int num_pages; num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
int ret; pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
pp_params.offset = stmmac_rx_offset(priv);
pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
ret = PTR_ERR(rx_q->page_pool);
rx_q->page_pool = NULL;
return ret;
}
rx_q->queue_index = queue; rx_q->buf_pool = kcalloc(priv->dma_rx_size,
rx_q->priv_data = priv; sizeof(*rx_q->buf_pool),
GFP_KERNEL);
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; if (!rx_q->buf_pool)
pp_params.pool_size = priv->dma_rx_size; return -ENOMEM;
num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
pp_params.offset = stmmac_rx_offset(priv);
pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
ret = PTR_ERR(rx_q->page_pool);
rx_q->page_pool = NULL;
goto err_dma;
}
rx_q->buf_pool = kcalloc(priv->dma_rx_size, if (priv->extend_desc) {
sizeof(*rx_q->buf_pool), rx_q->dma_erx = dma_alloc_coherent(priv->device,
GFP_KERNEL); priv->dma_rx_size *
if (!rx_q->buf_pool) sizeof(struct dma_extended_desc),
goto err_dma; &rx_q->dma_rx_phy,
GFP_KERNEL);
if (!rx_q->dma_erx)
return -ENOMEM;
if (priv->extend_desc) { } else {
rx_q->dma_erx = dma_alloc_coherent(priv->device, rx_q->dma_rx = dma_alloc_coherent(priv->device,
priv->dma_rx_size * priv->dma_rx_size *
sizeof(struct dma_extended_desc), sizeof(struct dma_desc),
&rx_q->dma_rx_phy, &rx_q->dma_rx_phy,
GFP_KERNEL); GFP_KERNEL);
if (!rx_q->dma_erx) if (!rx_q->dma_rx)
goto err_dma; return -ENOMEM;
}
} else { ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
rx_q->dma_rx = dma_alloc_coherent(priv->device, rx_q->queue_index,
priv->dma_rx_size * ch->rx_napi.napi_id);
sizeof(struct dma_desc), if (ret) {
&rx_q->dma_rx_phy, netdev_err(priv->dev, "Failed to register xdp rxq info\n");
GFP_KERNEL); return -EINVAL;
if (!rx_q->dma_rx) }
goto err_dma;
}
ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, return 0;
rx_q->queue_index, }
ch->rx_napi.napi_id);
if (ret) { static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
netdev_err(priv->dev, "Failed to register xdp rxq info\n"); {
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
int ret;
/* RX queues buffers and DMA */
for (queue = 0; queue < rx_count; queue++) {
ret = __alloc_dma_rx_desc_resources(priv, queue);
if (ret)
goto err_dma; goto err_dma;
}
} }
return 0; return 0;
...@@ -1949,60 +1939,70 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) ...@@ -1949,60 +1939,70 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
} }
/** /**
* alloc_dma_tx_desc_resources - alloc TX resources. * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
* @priv: private structure * @priv: private structure
* @queue: TX queue index
* Description: according to which descriptor can be used (extend or basic) * Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of * this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to * reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism. * allow zero-copy mechanism.
*/ */
static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
{ {
u32 tx_count = priv->plat->tx_queues_to_use; struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
int ret = -ENOMEM; size_t size;
u32 queue; void *addr;
/* TX queues buffers and DMA */ tx_q->queue_index = queue;
for (queue = 0; queue < tx_count; queue++) { tx_q->priv_data = priv;
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
size_t size;
void *addr;
tx_q->queue_index = queue; tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
tx_q->priv_data = priv; sizeof(*tx_q->tx_skbuff_dma),
GFP_KERNEL);
if (!tx_q->tx_skbuff_dma)
return -ENOMEM;
tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
sizeof(*tx_q->tx_skbuff_dma), sizeof(struct sk_buff *),
GFP_KERNEL); GFP_KERNEL);
if (!tx_q->tx_skbuff_dma) if (!tx_q->tx_skbuff)
goto err_dma; return -ENOMEM;
tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, if (priv->extend_desc)
sizeof(struct sk_buff *), size = sizeof(struct dma_extended_desc);
GFP_KERNEL); else if (tx_q->tbs & STMMAC_TBS_AVAIL)
if (!tx_q->tx_skbuff) size = sizeof(struct dma_edesc);
goto err_dma; else
size = sizeof(struct dma_desc);
if (priv->extend_desc) size *= priv->dma_tx_size;
size = sizeof(struct dma_extended_desc);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
size = sizeof(struct dma_edesc);
else
size = sizeof(struct dma_desc);
size *= priv->dma_tx_size; addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL);
if (!addr)
return -ENOMEM;
addr = dma_alloc_coherent(priv->device, size, if (priv->extend_desc)
&tx_q->dma_tx_phy, GFP_KERNEL); tx_q->dma_etx = addr;
if (!addr) else if (tx_q->tbs & STMMAC_TBS_AVAIL)
goto err_dma; tx_q->dma_entx = addr;
else
tx_q->dma_tx = addr;
if (priv->extend_desc) return 0;
tx_q->dma_etx = addr; }
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
tx_q->dma_entx = addr; static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
else {
tx_q->dma_tx = addr; u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
int ret;
/* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) {
ret = __alloc_dma_tx_desc_resources(priv, queue);
if (ret)
goto err_dma;
} }
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册