提交 efa6a7d0 编写于 作者: I Ioana Ciornei 提交者: David S. Miller

dpaa2-eth: properly handle buffer size restrictions

Depending on the WRIOP version, the buffer size on the RX path must by a
multiple of 64 or 256. Handle this restriction properly by aligning down
the buffer size to the necessary value. Also, use the new buffer size
dynamically computed instead of the compile time one.

Fixes: 27c87486 ("dpaa2-eth: Use a single page per Rx buffer")
Signed-off-by: NIoana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 207b584d
...@@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv, ...@@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
addr = dpaa2_sg_get_addr(&sgt[i]); addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
free_pages((unsigned long)sg_vaddr, 0); free_pages((unsigned long)sg_vaddr, 0);
...@@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, ...@@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* Get the address and length from the S/G entry */ /* Get the address and length from the S/G entry */
sg_addr = dpaa2_sg_get_addr(sge); sg_addr = dpaa2_sg_get_addr(sge);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge); sg_length = dpaa2_sg_get_len(sge);
...@@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, ...@@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
(page_address(page) - page_address(head_page)); (page_address(page) - page_address(head_page));
skb_add_rx_frag(skb, i - 1, head_page, page_offset, skb_add_rx_frag(skb, i - 1, head_page, page_offset,
sg_length, DPAA2_ETH_RX_BUF_SIZE); sg_length, priv->rx_buf_size);
} }
if (dpaa2_sg_is_final(sge)) if (dpaa2_sg_is_final(sge))
...@@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) ...@@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
free_pages((unsigned long)vaddr, 0); free_pages((unsigned long)vaddr, 0);
} }
...@@ -335,7 +335,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv, ...@@ -335,7 +335,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
break; break;
case XDP_REDIRECT: case XDP_REDIRECT:
dma_unmap_page(priv->net_dev->dev.parent, addr, dma_unmap_page(priv->net_dev->dev.parent, addr,
DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL); priv->rx_buf_size, DMA_BIDIRECTIONAL);
ch->buf_count--; ch->buf_count--;
xdp.data_hard_start = vaddr; xdp.data_hard_start = vaddr;
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
...@@ -374,7 +374,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, ...@@ -374,7 +374,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
trace_dpaa2_rx_fd(priv->net_dev, fd); trace_dpaa2_rx_fd(priv->net_dev, fd);
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE, dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
fas = dpaa2_get_fas(vaddr, false); fas = dpaa2_get_fas(vaddr, false);
...@@ -393,13 +393,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, ...@@ -393,13 +393,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return; return;
} }
dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
skb = build_linear_skb(ch, fd, vaddr); skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) { } else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog); WARN_ON(priv->xdp_prog);
dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
skb = build_frag_skb(priv, ch, buf_data); skb = build_frag_skb(priv, ch, buf_data);
free_pages((unsigned long)vaddr, 0); free_pages((unsigned long)vaddr, 0);
...@@ -974,7 +974,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, ...@@ -974,7 +974,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
if (!page) if (!page)
goto err_alloc; goto err_alloc;
addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE, addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, addr))) if (unlikely(dma_mapping_error(dev, addr)))
goto err_map; goto err_map;
...@@ -984,7 +984,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, ...@@ -984,7 +984,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
/* tracing point */ /* tracing point */
trace_dpaa2_eth_buf_seed(priv->net_dev, trace_dpaa2_eth_buf_seed(priv->net_dev,
page, DPAA2_ETH_RX_BUF_RAW_SIZE, page, DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, DPAA2_ETH_RX_BUF_SIZE, addr, priv->rx_buf_size,
bpid); bpid);
} }
...@@ -1720,7 +1720,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) ...@@ -1720,7 +1720,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
int mfl, linear_mfl; int mfl, linear_mfl;
mfl = DPAA2_ETH_L2_MAX_FRM(mtu); mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE - linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
if (mfl > linear_mfl) { if (mfl > linear_mfl) {
...@@ -2462,6 +2462,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) ...@@ -2462,6 +2462,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
else else
rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
/* We need to ensure that the buffer size seen by WRIOP is a multiple
* of 64 or 256 bytes depending on the WRIOP version.
*/
priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
/* tx buffer */ /* tx buffer */
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
buf_layout.pass_timestamp = true; buf_layout.pass_timestamp = true;
...@@ -3126,7 +3131,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv) ...@@ -3126,7 +3131,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
pools_params.num_dpbp = 1; pools_params.num_dpbp = 1;
pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
pools_params.pools[0].backup_pool = 0; pools_params.pools[0].backup_pool = 0;
pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; pools_params.pools[0].buffer_size = priv->rx_buf_size;
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
if (err) { if (err) {
dev_err(dev, "dpni_set_pools() failed\n"); dev_err(dev, "dpni_set_pools() failed\n");
......
...@@ -382,6 +382,7 @@ struct dpaa2_eth_priv { ...@@ -382,6 +382,7 @@ struct dpaa2_eth_priv {
u16 tx_data_offset; u16 tx_data_offset;
struct fsl_mc_device *dpbp_dev; struct fsl_mc_device *dpbp_dev;
u16 rx_buf_size;
u16 bpid; u16 bpid;
struct iommu_domain *iommu_domain; struct iommu_domain *iommu_domain;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册