提交 c4655761 编写于 作者: M Magnus Karlsson 提交者: Daniel Borkmann

xsk: i40e: ice: ixgbe: mlx5: Rename xsk zero-copy driver interfaces

Rename the AF_XDP zero-copy driver interface functions to better
reflect what they do after the replacement of umems with buffer
pools in the previous commit. Mostly it is about replacing the
umem name from the function names with xsk_buff and also have
them take the a buffer pool pointer instead of a umem. The
various ring functions have also been renamed in the process so
that they have the same naming convention as the internal
functions in xsk_queue.h. This so that it will be clearer what
they do and also for consistency.
Signed-off-by: NMagnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
Acked-by: NBjörn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-3-git-send-email-magnus.karlsson@intel.com
上级 1742b3d5
...@@ -3138,7 +3138,7 @@ static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring) ...@@ -3138,7 +3138,7 @@ static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
return NULL; return NULL;
return xdp_get_xsk_pool_from_qid(ring->vsi->netdev, qid); return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
} }
/** /**
...@@ -3286,7 +3286,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ...@@ -3286,7 +3286,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (ret) if (ret)
return ret; return ret;
ring->rx_buf_len = ring->rx_buf_len =
xsk_umem_get_rx_frame_size(ring->xsk_pool->umem); xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on /* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that * multiple buffers, thus letting us skip that
* handling in the fast-path. * handling in the fast-path.
...@@ -3370,7 +3370,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ...@@ -3370,7 +3370,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
writel(0, ring->tail); writel(0, ring->tail);
if (ring->xsk_pool) { if (ring->xsk_pool) {
xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq); xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)); ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
} else { } else {
ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
......
...@@ -55,8 +55,7 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, ...@@ -55,8 +55,7 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
qid >= netdev->real_num_tx_queues) qid >= netdev->real_num_tx_queues)
return -EINVAL; return -EINVAL;
err = xsk_buff_dma_map(pool->umem, &vsi->back->pdev->dev, err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
I40E_RX_DMA_ATTR);
if (err) if (err)
return err; return err;
...@@ -97,7 +96,7 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) ...@@ -97,7 +96,7 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
bool if_running; bool if_running;
int err; int err;
pool = xdp_get_xsk_pool_from_qid(netdev, qid); pool = xsk_get_pool_from_qid(netdev, qid);
if (!pool) if (!pool)
return -EINVAL; return -EINVAL;
...@@ -110,7 +109,7 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) ...@@ -110,7 +109,7 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
} }
clear_bit(qid, vsi->af_xdp_zc_qps); clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_buff_dma_unmap(pool->umem, I40E_RX_DMA_ATTR); xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
if (if_running) { if (if_running) {
err = i40e_queue_pair_enable(vsi, qid); err = i40e_queue_pair_enable(vsi, qid);
...@@ -196,7 +195,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) ...@@ -196,7 +195,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
rx_desc = I40E_RX_DESC(rx_ring, ntu); rx_desc = I40E_RX_DESC(rx_ring, ntu);
bi = i40e_rx_bi(rx_ring, ntu); bi = i40e_rx_bi(rx_ring, ntu);
do { do {
xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem); xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!xdp) { if (!xdp) {
ok = false; ok = false;
goto no_buffers; goto no_buffers;
...@@ -363,11 +362,11 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) ...@@ -363,11 +362,11 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) { if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem); xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else else
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem); xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets; return (int)total_rx_packets;
} }
...@@ -390,12 +389,11 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) ...@@ -390,12 +389,11 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
dma_addr_t dma; dma_addr_t dma;
while (budget-- > 0) { while (budget-- > 0) {
if (!xsk_umem_consume_tx(xdp_ring->xsk_pool->umem, &desc)) if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break; break;
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool->umem, dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
desc.addr); xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool->umem, dma,
desc.len); desc.len);
tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use]; tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
...@@ -422,7 +420,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) ...@@ -422,7 +420,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
I40E_TXD_QW1_CMD_SHIFT); I40E_TXD_QW1_CMD_SHIFT);
i40e_xdp_ring_update_tail(xdp_ring); i40e_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_pool->umem); xsk_tx_release(xdp_ring->xsk_pool);
i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes); i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
} }
...@@ -494,13 +492,13 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) ...@@ -494,13 +492,13 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
tx_ring->next_to_clean -= tx_ring->count; tx_ring->next_to_clean -= tx_ring->count;
if (xsk_frames) if (xsk_frames)
xsk_umem_complete_tx(bp->umem, xsk_frames); xsk_tx_completed(bp, xsk_frames);
i40e_arm_wb(tx_ring, vsi, completed_frames); i40e_arm_wb(tx_ring, vsi, completed_frames);
out_xmit: out_xmit:
if (xsk_umem_uses_need_wakeup(tx_ring->xsk_pool->umem)) if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
xsk_set_tx_need_wakeup(tx_ring->xsk_pool->umem); xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring)); return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
} }
...@@ -591,7 +589,7 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) ...@@ -591,7 +589,7 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
} }
if (xsk_frames) if (xsk_frames)
xsk_umem_complete_tx(bp->umem, xsk_frames); xsk_tx_completed(bp, xsk_frames);
} }
/** /**
...@@ -607,7 +605,7 @@ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) ...@@ -607,7 +605,7 @@ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
int i; int i;
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
if (xdp_get_xsk_pool_from_qid(netdev, i)) if (xsk_get_pool_from_qid(netdev, i))
return true; return true;
} }
......
...@@ -313,7 +313,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -313,7 +313,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len = ring->rx_buf_len =
xsk_umem_get_rx_frame_size(ring->xsk_pool->umem); xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on /* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that * multiple buffers, thus letting us skip that
* handling in the fast-path. * handling in the fast-path.
...@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
NULL); NULL);
if (err) if (err)
return err; return err;
xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq); xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index); ring->q_index);
...@@ -418,7 +418,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -418,7 +418,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
writel(0, ring->tail); writel(0, ring->tail);
if (ring->xsk_pool) { if (ring->xsk_pool) {
if (!xsk_buff_can_alloc(ring->xsk_pool->umem, num_bufs)) { if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n", dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
num_bufs, ring->q_index); num_bufs, ring->q_index);
dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n"); dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
......
...@@ -311,7 +311,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) ...@@ -311,7 +311,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
!vsi->xsk_pools[qid]) !vsi->xsk_pools[qid])
return -EINVAL; return -EINVAL;
xsk_buff_dma_unmap(vsi->xsk_pools[qid]->umem, ICE_RX_DMA_ATTR); xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
ice_xsk_remove_pool(vsi, qid); ice_xsk_remove_pool(vsi, qid);
return 0; return 0;
...@@ -348,7 +348,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) ...@@ -348,7 +348,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
vsi->xsk_pools[qid] = pool; vsi->xsk_pools[qid] = pool;
vsi->num_xsk_pools_used++; vsi->num_xsk_pools_used++;
err = xsk_buff_dma_map(vsi->xsk_pools[qid]->umem, ice_pf_to_dev(vsi->back), err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
ICE_RX_DMA_ATTR); ICE_RX_DMA_ATTR);
if (err) if (err)
return err; return err;
...@@ -425,7 +425,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) ...@@ -425,7 +425,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
rx_buf = &rx_ring->rx_buf[ntu]; rx_buf = &rx_ring->rx_buf[ntu];
do { do {
rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem); rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!rx_buf->xdp) { if (!rx_buf->xdp) {
ret = true; ret = true;
break; break;
...@@ -645,11 +645,11 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) ...@@ -645,11 +645,11 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
ice_finalize_xdp_rx(rx_ring, xdp_xmit); ice_finalize_xdp_rx(rx_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) { if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem); xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else else
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem); xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets; return (int)total_rx_packets;
} }
...@@ -682,11 +682,11 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) ...@@ -682,11 +682,11 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
if (!xsk_umem_consume_tx(xdp_ring->xsk_pool->umem, &desc)) if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break; break;
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool->umem, desc.addr); dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool->umem, dma, xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len); desc.len);
tx_buf->bytecount = desc.len; tx_buf->bytecount = desc.len;
...@@ -703,7 +703,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) ...@@ -703,7 +703,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) { if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring); ice_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_pool->umem); xsk_tx_release(xdp_ring->xsk_pool);
} }
return budget > 0 && work_done; return budget > 0 && work_done;
...@@ -777,10 +777,10 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) ...@@ -777,10 +777,10 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
xdp_ring->next_to_clean = ntc; xdp_ring->next_to_clean = ntc;
if (xsk_frames) if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_pool->umem, xsk_frames); xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_pool->umem)) if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
xsk_set_tx_need_wakeup(xdp_ring->xsk_pool->umem); xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes); ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK); xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
...@@ -896,5 +896,5 @@ void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) ...@@ -896,5 +896,5 @@ void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
} }
if (xsk_frames) if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_pool->umem, xsk_frames); xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
} }
...@@ -3714,7 +3714,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, ...@@ -3714,7 +3714,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
/* configure the packet buffer length */ /* configure the packet buffer length */
if (rx_ring->xsk_pool) { if (rx_ring->xsk_pool) {
u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_pool->umem); u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
/* If the MAC support setting RXDCTL.RLPML, the /* If the MAC support setting RXDCTL.RLPML, the
* SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
...@@ -4064,7 +4064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, ...@@ -4064,7 +4064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, MEM_TYPE_XSK_BUFF_POOL,
NULL)); NULL));
xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq); xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
} else { } else {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL)); MEM_TYPE_PAGE_SHARED, NULL));
...@@ -4120,7 +4120,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, ...@@ -4120,7 +4120,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
} }
if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_pool->umem); u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN); IXGBE_RXDCTL_RLPML_EN);
......
...@@ -17,7 +17,7 @@ struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter, ...@@ -17,7 +17,7 @@ struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps)) if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
return NULL; return NULL;
return xdp_get_xsk_pool_from_qid(adapter->netdev, qid); return xsk_get_pool_from_qid(adapter->netdev, qid);
} }
static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter, static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
...@@ -35,7 +35,7 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter, ...@@ -35,7 +35,7 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
qid >= netdev->real_num_tx_queues) qid >= netdev->real_num_tx_queues)
return -EINVAL; return -EINVAL;
err = xsk_buff_dma_map(pool->umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR); err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
if (err) if (err)
return err; return err;
...@@ -64,7 +64,7 @@ static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid) ...@@ -64,7 +64,7 @@ static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
struct xsk_buff_pool *pool; struct xsk_buff_pool *pool;
bool if_running; bool if_running;
pool = xdp_get_xsk_pool_from_qid(adapter->netdev, qid); pool = xsk_get_pool_from_qid(adapter->netdev, qid);
if (!pool) if (!pool)
return -EINVAL; return -EINVAL;
...@@ -75,7 +75,7 @@ static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid) ...@@ -75,7 +75,7 @@ static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
ixgbe_txrx_ring_disable(adapter, qid); ixgbe_txrx_ring_disable(adapter, qid);
clear_bit(qid, adapter->af_xdp_zc_qps); clear_bit(qid, adapter->af_xdp_zc_qps);
xsk_buff_dma_unmap(pool->umem, IXGBE_RX_DMA_ATTR); xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
if (if_running) if (if_running)
ixgbe_txrx_ring_enable(adapter, qid); ixgbe_txrx_ring_enable(adapter, qid);
...@@ -150,7 +150,7 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) ...@@ -150,7 +150,7 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
i -= rx_ring->count; i -= rx_ring->count;
do { do {
bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem); bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!bi->xdp) { if (!bi->xdp) {
ok = false; ok = false;
break; break;
...@@ -345,11 +345,11 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, ...@@ -345,11 +345,11 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes; q_vector->rx.total_bytes += total_rx_bytes;
if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) { if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem); xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else else
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem); xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets; return (int)total_rx_packets;
} }
...@@ -389,11 +389,11 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) ...@@ -389,11 +389,11 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
break; break;
} }
if (!xsk_umem_consume_tx(pool->umem, &desc)) if (!xsk_tx_peek_desc(pool, &desc))
break; break;
dma = xsk_buff_raw_get_dma(pool->umem, desc.addr); dma = xsk_buff_raw_get_dma(pool, desc.addr);
xsk_buff_raw_dma_sync_for_device(pool->umem, dma, desc.len); xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
tx_bi->bytecount = desc.len; tx_bi->bytecount = desc.len;
...@@ -419,7 +419,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) ...@@ -419,7 +419,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
if (tx_desc) { if (tx_desc) {
ixgbe_xdp_ring_update_tail(xdp_ring); ixgbe_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(pool->umem); xsk_tx_release(pool);
} }
return !!budget && work_done; return !!budget && work_done;
...@@ -485,10 +485,10 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, ...@@ -485,10 +485,10 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
q_vector->tx.total_packets += total_packets; q_vector->tx.total_packets += total_packets;
if (xsk_frames) if (xsk_frames)
xsk_umem_complete_tx(pool->umem, xsk_frames); xsk_tx_completed(pool, xsk_frames);
if (xsk_umem_uses_need_wakeup(pool->umem)) if (xsk_uses_need_wakeup(pool))
xsk_set_tx_need_wakeup(pool->umem); xsk_set_tx_need_wakeup(pool);
return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
} }
...@@ -547,5 +547,5 @@ void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring) ...@@ -547,5 +547,5 @@ void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
} }
if (xsk_frames) if (xsk_frames)
xsk_umem_complete_tx(pool->umem, xsk_frames); xsk_tx_completed(pool, xsk_frames);
} }
...@@ -445,7 +445,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) ...@@ -445,7 +445,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
if (xsk_frames) if (xsk_frames)
xsk_umem_complete_tx(sq->xsk_pool->umem, xsk_frames); xsk_tx_completed(sq->xsk_pool, xsk_frames);
sq->stats->cqes += i; sq->stats->cqes += i;
...@@ -475,7 +475,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) ...@@ -475,7 +475,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
} }
if (xsk_frames) if (xsk_frames)
xsk_umem_complete_tx(sq->xsk_pool->umem, xsk_frames); xsk_tx_completed(sq->xsk_pool, xsk_frames);
} }
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
......
...@@ -11,13 +11,13 @@ static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv, ...@@ -11,13 +11,13 @@ static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
{ {
struct device *dev = priv->mdev->device; struct device *dev = priv->mdev->device;
return xsk_buff_dma_map(pool->umem, dev, 0); return xsk_pool_dma_map(pool, dev, 0);
} }
static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv, static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
struct xsk_buff_pool *pool) struct xsk_buff_pool *pool)
{ {
return xsk_buff_dma_unmap(pool->umem, 0); return xsk_pool_dma_unmap(pool, 0);
} }
static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk) static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
...@@ -64,14 +64,14 @@ static void mlx5e_xsk_remove_pool(struct mlx5e_xsk *xsk, u16 ix) ...@@ -64,14 +64,14 @@ static void mlx5e_xsk_remove_pool(struct mlx5e_xsk *xsk, u16 ix)
static bool mlx5e_xsk_is_pool_sane(struct xsk_buff_pool *pool) static bool mlx5e_xsk_is_pool_sane(struct xsk_buff_pool *pool)
{ {
return xsk_umem_get_headroom(pool->umem) <= 0xffff && return xsk_pool_get_headroom(pool) <= 0xffff &&
xsk_umem_get_chunk_size(pool->umem) <= 0xffff; xsk_pool_get_chunk_size(pool) <= 0xffff;
} }
void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk) void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk)
{ {
xsk->headroom = xsk_umem_get_headroom(pool->umem); xsk->headroom = xsk_pool_get_headroom(pool);
xsk->chunk_size = xsk_umem_get_chunk_size(pool->umem); xsk->chunk_size = xsk_pool_get_chunk_size(pool);
} }
static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv, static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
......
...@@ -22,7 +22,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, ...@@ -22,7 +22,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq, static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info) struct mlx5e_dma_info *dma_info)
{ {
dma_info->xsk = xsk_buff_alloc(rq->xsk_pool->umem); dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
if (!dma_info->xsk) if (!dma_info->xsk)
return -ENOMEM; return -ENOMEM;
...@@ -38,13 +38,13 @@ static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq, ...@@ -38,13 +38,13 @@ static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err) static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
{ {
if (!xsk_umem_uses_need_wakeup(rq->xsk_pool->umem)) if (!xsk_uses_need_wakeup(rq->xsk_pool))
return alloc_err; return alloc_err;
if (unlikely(alloc_err)) if (unlikely(alloc_err))
xsk_set_rx_need_wakeup(rq->xsk_pool->umem); xsk_set_rx_need_wakeup(rq->xsk_pool);
else else
xsk_clear_rx_need_wakeup(rq->xsk_pool->umem); xsk_clear_rx_need_wakeup(rq->xsk_pool);
return false; return false;
} }
......
...@@ -87,7 +87,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) ...@@ -87,7 +87,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
break; break;
} }
if (!xsk_umem_consume_tx(pool->umem, &desc)) { if (!xsk_tx_peek_desc(pool, &desc)) {
/* TX will get stuck until something wakes it up by /* TX will get stuck until something wakes it up by
* triggering NAPI. Currently it's expected that the * triggering NAPI. Currently it's expected that the
* application calls sendto() if there are consumed, but * application calls sendto() if there are consumed, but
...@@ -96,11 +96,11 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) ...@@ -96,11 +96,11 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
break; break;
} }
xdptxd.dma_addr = xsk_buff_raw_get_dma(pool->umem, desc.addr); xdptxd.dma_addr = xsk_buff_raw_get_dma(pool, desc.addr);
xdptxd.data = xsk_buff_raw_get_data(pool->umem, desc.addr); xdptxd.data = xsk_buff_raw_get_data(pool, desc.addr);
xdptxd.len = desc.len; xdptxd.len = desc.len;
xsk_buff_raw_dma_sync_for_device(pool->umem, xdptxd.dma_addr, xdptxd.len); xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result); mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
...@@ -119,7 +119,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) ...@@ -119,7 +119,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
mlx5e_xdp_mpwqe_complete(sq); mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xmit_xdp_doorbell(sq); mlx5e_xmit_xdp_doorbell(sq);
xsk_umem_consume_tx_done(pool->umem); xsk_tx_release(pool);
} }
return !(budget && work_done); return !(budget && work_done);
......
...@@ -15,13 +15,13 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget); ...@@ -15,13 +15,13 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget);
static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq) static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq)
{ {
if (!xsk_umem_uses_need_wakeup(sq->xsk_pool->umem)) if (!xsk_uses_need_wakeup(sq->xsk_pool))
return; return;
if (sq->pc != sq->cc) if (sq->pc != sq->cc)
xsk_clear_tx_need_wakeup(sq->xsk_pool->umem); xsk_clear_tx_need_wakeup(sq->xsk_pool);
else else
xsk_set_tx_need_wakeup(sq->xsk_pool->umem); xsk_set_tx_need_wakeup(sq->xsk_pool);
} }
#endif /* __MLX5_EN_XSK_TX_H__ */ #endif /* __MLX5_EN_XSK_TX_H__ */
...@@ -477,7 +477,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -477,7 +477,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (xsk) { if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL); MEM_TYPE_XSK_BUFF_POOL, NULL);
xsk_buff_set_rxq_info(rq->xsk_pool->umem, &rq->xdp_rxq); xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
} else { } else {
/* Create a page_pool and register it with rxq */ /* Create a page_pool and register it with rxq */
pp_params.order = 0; pp_params.order = 0;
......
...@@ -407,7 +407,7 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) ...@@ -407,7 +407,7 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
* allocating one-by-one, failing and moving frames to the * allocating one-by-one, failing and moving frames to the
* Reuse Ring. * Reuse Ring.
*/ */
if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool->umem, pages_desired))) if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired)))
return -ENOMEM; return -ENOMEM;
} }
...@@ -506,7 +506,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -506,7 +506,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
* one-by-one, failing and moving frames to the Reuse Ring. * one-by-one, failing and moving frames to the Reuse Ring.
*/ */
if (rq->xsk_pool && if (rq->xsk_pool &&
unlikely(!xsk_buff_can_alloc(rq->xsk_pool->umem, MLX5_MPWRQ_PAGES_PER_WQE))) { unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) {
err = -ENOMEM; err = -ENOMEM;
goto err; goto err;
} }
......
...@@ -52,6 +52,7 @@ struct xdp_sock { ...@@ -52,6 +52,7 @@ struct xdp_sock {
struct net_device *dev; struct net_device *dev;
struct xdp_umem *umem; struct xdp_umem *umem;
struct list_head flush_node; struct list_head flush_node;
struct xsk_buff_pool *pool;
u16 queue_id; u16 queue_id;
bool zc; bool zc;
enum { enum {
......
...@@ -11,48 +11,50 @@ ...@@ -11,48 +11,50 @@
#ifdef CONFIG_XDP_SOCKETS #ifdef CONFIG_XDP_SOCKETS
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
void xsk_umem_consume_tx_done(struct xdp_umem *umem); void xsk_tx_release(struct xsk_buff_pool *pool);
struct xsk_buff_pool *xdp_get_xsk_pool_from_qid(struct net_device *dev, struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
u16 queue_id); u16 queue_id);
void xsk_set_rx_need_wakeup(struct xdp_umem *umem); void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
void xsk_set_tx_need_wakeup(struct xdp_umem *umem); void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
void xsk_clear_rx_need_wakeup(struct xdp_umem *umem); void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
void xsk_clear_tx_need_wakeup(struct xdp_umem *umem); void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem); bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem) static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
{ {
return XDP_PACKET_HEADROOM + umem->headroom; return XDP_PACKET_HEADROOM + pool->headroom;
} }
static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem) static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
{ {
return umem->chunk_size; return pool->chunk_size;
} }
static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem) static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
{ {
return xsk_umem_get_chunk_size(umem) - xsk_umem_get_headroom(umem); return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
} }
static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem, static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
struct xdp_rxq_info *rxq) struct xdp_rxq_info *rxq)
{ {
xp_set_rxq_info(umem->pool, rxq); xp_set_rxq_info(pool, rxq);
} }
static inline void xsk_buff_dma_unmap(struct xdp_umem *umem, static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs) unsigned long attrs)
{ {
xp_dma_unmap(umem->pool, attrs); xp_dma_unmap(pool, attrs);
} }
static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev, static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
unsigned long attrs) struct device *dev, unsigned long attrs)
{ {
return xp_dma_map(umem->pool, dev, attrs, umem->pgs, umem->npgs); struct xdp_umem *umem = pool->umem;
return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
} }
static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
...@@ -69,14 +71,14 @@ static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) ...@@ -69,14 +71,14 @@ static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
return xp_get_frame_dma(xskb); return xp_get_frame_dma(xskb);
} }
static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem) static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
{ {
return xp_alloc(umem->pool); return xp_alloc(pool);
} }
static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count) static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{ {
return xp_can_alloc(umem->pool, count); return xp_can_alloc(pool, count);
} }
static inline void xsk_buff_free(struct xdp_buff *xdp) static inline void xsk_buff_free(struct xdp_buff *xdp)
...@@ -86,14 +88,15 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) ...@@ -86,14 +88,15 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
xp_free(xskb); xp_free(xskb);
} }
static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr) static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
u64 addr)
{ {
return xp_raw_get_dma(umem->pool, addr); return xp_raw_get_dma(pool, addr);
} }
static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr) static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
{ {
return xp_raw_get_data(umem->pool, addr); return xp_raw_get_data(pool, addr);
} }
static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
...@@ -103,83 +106,83 @@ static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) ...@@ -103,83 +106,83 @@ static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
xp_dma_sync_for_cpu(xskb); xp_dma_sync_for_cpu(xskb);
} }
static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem, static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma, dma_addr_t dma,
size_t size) size_t size)
{ {
xp_dma_sync_for_device(umem->pool, dma, size); xp_dma_sync_for_device(pool, dma, size);
} }
#else #else
static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
{ {
} }
static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
struct xdp_desc *desc) struct xdp_desc *desc)
{ {
return false; return false;
} }
static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) static inline void xsk_tx_release(struct xsk_buff_pool *pool)
{ {
} }
static inline struct xsk_buff_pool * static inline struct xsk_buff_pool *
xdp_get_xsk_pool_from_qid(struct net_device *dev, u16 queue_id) xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
{ {
return NULL; return NULL;
} }
static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem) static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{ {
} }
static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem) static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
{ {
} }
static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
{ {
} }
static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
{ {
} }
static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
{ {
return false; return false;
} }
static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem) static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
{ {
return 0; return 0;
} }
static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem) static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
{ {
return 0; return 0;
} }
static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem) static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
{ {
return 0; return 0;
} }
static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem, static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
struct xdp_rxq_info *rxq) struct xdp_rxq_info *rxq)
{ {
} }
static inline void xsk_buff_dma_unmap(struct xdp_umem *umem, static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs) unsigned long attrs)
{ {
} }
static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev, static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
unsigned long attrs) struct device *dev, unsigned long attrs)
{ {
return 0; return 0;
} }
...@@ -194,12 +197,12 @@ static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) ...@@ -194,12 +197,12 @@ static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
return 0; return 0;
} }
static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem) static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
{ {
return NULL; return NULL;
} }
static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count) static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{ {
return false; return false;
} }
...@@ -208,12 +211,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) ...@@ -208,12 +211,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
{ {
} }
static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr) static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
u64 addr)
{ {
return 0; return 0;
} }
static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr) static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
{ {
return NULL; return NULL;
} }
...@@ -222,7 +226,7 @@ static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) ...@@ -222,7 +226,7 @@ static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
{ {
} }
static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem, static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma, dma_addr_t dma,
size_t size) size_t size)
{ {
......
...@@ -223,7 +223,7 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info) ...@@ -223,7 +223,7 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
from_channel = channels.combined_count + from_channel = channels.combined_count +
min(channels.rx_count, channels.tx_count); min(channels.rx_count, channels.tx_count);
for (i = from_channel; i < old_total; i++) for (i = from_channel; i < old_total; i++)
if (xdp_get_xsk_pool_from_qid(dev, i)) { if (xsk_get_pool_from_qid(dev, i)) {
GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets"); GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets");
return -EINVAL; return -EINVAL;
} }
......
...@@ -1706,7 +1706,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev, ...@@ -1706,7 +1706,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
min(channels.rx_count, channels.tx_count); min(channels.rx_count, channels.tx_count);
to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count); to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count);
for (i = from_channel; i < to_channel; i++) for (i = from_channel; i < to_channel; i++)
if (xdp_get_xsk_pool_from_qid(dev, i)) if (xsk_get_pool_from_qid(dev, i))
return -EINVAL; return -EINVAL;
ret = dev->ethtool_ops->set_channels(dev, &channels); ret = dev->ethtool_ops->set_channels(dev, &channels);
......
...@@ -51,9 +51,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) ...@@ -51,9 +51,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
* not know if the device has more tx queues than rx, or the opposite. * not know if the device has more tx queues than rx, or the opposite.
* This might also change during run time. * This might also change during run time.
*/ */
static int xdp_reg_xsk_pool_at_qid(struct net_device *dev, static int xsk_reg_pool_at_qid(struct net_device *dev,
struct xsk_buff_pool *pool, struct xsk_buff_pool *pool,
u16 queue_id) u16 queue_id)
{ {
if (queue_id >= max_t(unsigned int, if (queue_id >= max_t(unsigned int,
dev->real_num_rx_queues, dev->real_num_rx_queues,
...@@ -68,8 +68,8 @@ static int xdp_reg_xsk_pool_at_qid(struct net_device *dev, ...@@ -68,8 +68,8 @@ static int xdp_reg_xsk_pool_at_qid(struct net_device *dev,
return 0; return 0;
} }
struct xsk_buff_pool *xdp_get_xsk_pool_from_qid(struct net_device *dev, struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
u16 queue_id) u16 queue_id)
{ {
if (queue_id < dev->real_num_rx_queues) if (queue_id < dev->real_num_rx_queues)
return dev->_rx[queue_id].pool; return dev->_rx[queue_id].pool;
...@@ -78,9 +78,9 @@ struct xsk_buff_pool *xdp_get_xsk_pool_from_qid(struct net_device *dev, ...@@ -78,9 +78,9 @@ struct xsk_buff_pool *xdp_get_xsk_pool_from_qid(struct net_device *dev,
return NULL; return NULL;
} }
EXPORT_SYMBOL(xdp_get_xsk_pool_from_qid); EXPORT_SYMBOL(xsk_get_pool_from_qid);
static void xdp_clear_xsk_pool_at_qid(struct net_device *dev, u16 queue_id) static void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
{ {
if (queue_id < dev->real_num_rx_queues) if (queue_id < dev->real_num_rx_queues)
dev->_rx[queue_id].pool = NULL; dev->_rx[queue_id].pool = NULL;
...@@ -103,10 +103,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, ...@@ -103,10 +103,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
if (force_zc && force_copy) if (force_zc && force_copy)
return -EINVAL; return -EINVAL;
if (xdp_get_xsk_pool_from_qid(dev, queue_id)) if (xsk_get_pool_from_qid(dev, queue_id))
return -EBUSY; return -EBUSY;
err = xdp_reg_xsk_pool_at_qid(dev, umem->pool, queue_id); err = xsk_reg_pool_at_qid(dev, umem->pool, queue_id);
if (err) if (err)
return err; return err;
...@@ -119,7 +119,7 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, ...@@ -119,7 +119,7 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
* Also for supporting drivers that do not implement this * Also for supporting drivers that do not implement this
* feature. They will always have to call sendto(). * feature. They will always have to call sendto().
*/ */
xsk_set_tx_need_wakeup(umem); xsk_set_tx_need_wakeup(umem->pool);
} }
dev_hold(dev); dev_hold(dev);
...@@ -148,7 +148,7 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, ...@@ -148,7 +148,7 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
if (!force_zc) if (!force_zc)
err = 0; /* fallback to copy mode */ err = 0; /* fallback to copy mode */
if (err) if (err)
xdp_clear_xsk_pool_at_qid(dev, queue_id); xsk_clear_pool_at_qid(dev, queue_id);
return err; return err;
} }
...@@ -173,7 +173,7 @@ void xdp_umem_clear_dev(struct xdp_umem *umem) ...@@ -173,7 +173,7 @@ void xdp_umem_clear_dev(struct xdp_umem *umem)
WARN(1, "failed to disable umem!\n"); WARN(1, "failed to disable umem!\n");
} }
xdp_clear_xsk_pool_at_qid(umem->dev, umem->queue_id); xsk_clear_pool_at_qid(umem->dev, umem->queue_id);
dev_put(umem->dev); dev_put(umem->dev);
umem->dev = NULL; umem->dev = NULL;
......
...@@ -39,8 +39,10 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) ...@@ -39,8 +39,10 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
READ_ONCE(xs->umem->fq); READ_ONCE(xs->umem->fq);
} }
void xsk_set_rx_need_wakeup(struct xdp_umem *umem) void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{ {
struct xdp_umem *umem = pool->umem;
if (umem->need_wakeup & XDP_WAKEUP_RX) if (umem->need_wakeup & XDP_WAKEUP_RX)
return; return;
...@@ -49,8 +51,9 @@ void xsk_set_rx_need_wakeup(struct xdp_umem *umem) ...@@ -49,8 +51,9 @@ void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
} }
EXPORT_SYMBOL(xsk_set_rx_need_wakeup); EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
void xsk_set_tx_need_wakeup(struct xdp_umem *umem) void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
{ {
struct xdp_umem *umem = pool->umem;
struct xdp_sock *xs; struct xdp_sock *xs;
if (umem->need_wakeup & XDP_WAKEUP_TX) if (umem->need_wakeup & XDP_WAKEUP_TX)
...@@ -66,8 +69,10 @@ void xsk_set_tx_need_wakeup(struct xdp_umem *umem) ...@@ -66,8 +69,10 @@ void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
} }
EXPORT_SYMBOL(xsk_set_tx_need_wakeup); EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
{ {
struct xdp_umem *umem = pool->umem;
if (!(umem->need_wakeup & XDP_WAKEUP_RX)) if (!(umem->need_wakeup & XDP_WAKEUP_RX))
return; return;
...@@ -76,8 +81,9 @@ void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) ...@@ -76,8 +81,9 @@ void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
} }
EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
{ {
struct xdp_umem *umem = pool->umem;
struct xdp_sock *xs; struct xdp_sock *xs;
if (!(umem->need_wakeup & XDP_WAKEUP_TX)) if (!(umem->need_wakeup & XDP_WAKEUP_TX))
...@@ -93,11 +99,11 @@ void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) ...@@ -93,11 +99,11 @@ void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
} }
EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
{ {
return umem->flags & XDP_UMEM_USES_NEED_WAKEUP; return pool->umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
} }
EXPORT_SYMBOL(xsk_umem_uses_need_wakeup); EXPORT_SYMBOL(xsk_uses_need_wakeup);
void xp_release(struct xdp_buff_xsk *xskb) void xp_release(struct xdp_buff_xsk *xskb)
{ {
...@@ -155,12 +161,12 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len, ...@@ -155,12 +161,12 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
struct xdp_buff *xsk_xdp; struct xdp_buff *xsk_xdp;
int err; int err;
if (len > xsk_umem_get_rx_frame_size(xs->umem)) { if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
xs->rx_dropped++; xs->rx_dropped++;
return -ENOSPC; return -ENOSPC;
} }
xsk_xdp = xsk_buff_alloc(xs->umem); xsk_xdp = xsk_buff_alloc(xs->pool);
if (!xsk_xdp) { if (!xsk_xdp) {
xs->rx_dropped++; xs->rx_dropped++;
return -ENOSPC; return -ENOSPC;
...@@ -249,27 +255,28 @@ void __xsk_map_flush(void) ...@@ -249,27 +255,28 @@ void __xsk_map_flush(void)
} }
} }
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
{ {
xskq_prod_submit_n(umem->cq, nb_entries); xskq_prod_submit_n(pool->umem->cq, nb_entries);
} }
EXPORT_SYMBOL(xsk_umem_complete_tx); EXPORT_SYMBOL(xsk_tx_completed);
void xsk_umem_consume_tx_done(struct xdp_umem *umem) void xsk_tx_release(struct xsk_buff_pool *pool)
{ {
struct xdp_sock *xs; struct xdp_sock *xs;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { list_for_each_entry_rcu(xs, &pool->umem->xsk_tx_list, list) {
__xskq_cons_release(xs->tx); __xskq_cons_release(xs->tx);
xs->sk.sk_write_space(&xs->sk); xs->sk.sk_write_space(&xs->sk);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
EXPORT_SYMBOL(xsk_umem_consume_tx_done); EXPORT_SYMBOL(xsk_tx_release);
bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc) bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
{ {
struct xdp_umem *umem = pool->umem;
struct xdp_sock *xs; struct xdp_sock *xs;
rcu_read_lock(); rcu_read_lock();
...@@ -296,7 +303,7 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc) ...@@ -296,7 +303,7 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
rcu_read_unlock(); rcu_read_unlock();
return false; return false;
} }
EXPORT_SYMBOL(xsk_umem_consume_tx); EXPORT_SYMBOL(xsk_tx_peek_desc);
static int xsk_wakeup(struct xdp_sock *xs, u8 flags) static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
{ {
...@@ -359,7 +366,7 @@ static int xsk_generic_xmit(struct sock *sk) ...@@ -359,7 +366,7 @@ static int xsk_generic_xmit(struct sock *sk)
skb_put(skb, len); skb_put(skb, len);
addr = desc.addr; addr = desc.addr;
buffer = xsk_buff_raw_get_data(xs->umem, addr); buffer = xsk_buff_raw_get_data(xs->pool, addr);
err = skb_store_bits(skb, 0, buffer, len); err = skb_store_bits(skb, 0, buffer, len);
/* This is the backpressure mechanism for the Tx path. /* This is the backpressure mechanism for the Tx path.
* Reserve space in the completion queue and only proceed * Reserve space in the completion queue and only proceed
...@@ -762,6 +769,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, ...@@ -762,6 +769,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
return PTR_ERR(umem); return PTR_ERR(umem);
} }
xs->pool = umem->pool;
/* Make sure umem is ready before it can be seen by others */ /* Make sure umem is ready before it can be seen by others */
smp_wmb(); smp_wmb();
WRITE_ONCE(xs->umem, umem); WRITE_ONCE(xs->umem, umem);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册