提交 fc11fbf9 编写于 作者: S Saeed Mahameed 提交者: David S. Miller

net/mlx5e: Add HW cacheline start padding

Enable HW cacheline start padding and align RX WQE size to cacheline
while considering HW start padding. Also, fix dma_unmap call to use
the correct SKB data buffer size.
Signed-off-by: NSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: NOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 facc9699
...@@ -309,12 +309,15 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, ...@@ -309,12 +309,15 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz : rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
MLX5E_SW2HW_MTU(priv->netdev->mtu); MLX5E_SW2HW_MTU(priv->netdev->mtu);
rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
for (i = 0; i < wq_sz; i++) { for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
wqe->data.lkey = c->mkey_be; wqe->data.lkey = c->mkey_be;
wqe->data.byte_count = cpu_to_be32(rq->wqe_sz); wqe->data.byte_count =
cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
} }
rq->pdev = c->pdev; rq->pdev = c->pdev;
......
...@@ -45,11 +45,9 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, ...@@ -45,11 +45,9 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
if (unlikely(!skb)) if (unlikely(!skb))
return -ENOMEM; return -ENOMEM;
skb_reserve(skb, MLX5E_NET_IP_ALIGN);
dma_addr = dma_map_single(rq->pdev, dma_addr = dma_map_single(rq->pdev,
/* hw start padding */ /* hw start padding */
skb->data - MLX5E_NET_IP_ALIGN, skb->data,
/* hw end padding */ /* hw end padding */
rq->wqe_sz, rq->wqe_sz,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
...@@ -57,6 +55,8 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, ...@@ -57,6 +55,8 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
goto err_free_skb; goto err_free_skb;
skb_reserve(skb, MLX5E_NET_IP_ALIGN);
*((dma_addr_t *)skb->cb) = dma_addr; *((dma_addr_t *)skb->cb) = dma_addr;
wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
...@@ -217,7 +217,7 @@ bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) ...@@ -217,7 +217,7 @@ bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
dma_unmap_single(rq->pdev, dma_unmap_single(rq->pdev,
*((dma_addr_t *)skb->cb), *((dma_addr_t *)skb->cb),
skb_end_offset(skb), rq->wqe_sz,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
......
...@@ -131,6 +131,10 @@ enum { ...@@ -131,6 +131,10 @@ enum {
MLX5_INLINE_SEG = 0x80000000, MLX5_INLINE_SEG = 0x80000000,
}; };
enum {
MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
};
enum { enum {
MLX5_MIN_PKEY_TABLE_SIZE = 128, MLX5_MIN_PKEY_TABLE_SIZE = 128,
MLX5_MAX_LOG_PKEY_TABLE = 5, MLX5_MAX_LOG_PKEY_TABLE = 5,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册