提交 dcb421f4 编写于 作者: D David S. Miller

Merge branch 'mlx5-xdp-perf-optimizations'

Saeed Mahameed says:

====================
Mellanox mlx5e XDP performance optimization

This series provides some preformancee optimizations for mlx5e
driver, especially for XDP TX flows.

1st patch is a simple change of rmb to dma_rmb in CQE fetch routine
which shows a huge gain for both RX and TX packet rates.

2nd patch removes write combining logic from the driver TX handler
and simplifies the TX logic while improving TX CPU utilization.

All other patches combined provide some refactoring to the driver TX
flows to allow some significant XDP TX improvements.

More details and performance numbers per patch can be found in each patch
commit message compared to the preceding patch.

Overall performance improvemnets
  System: Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz

Test case                   Baseline      Now      improvement
---------------------------------------------------------------
TX packets (24 threads)     45Mpps        54Mpps      20%
TC stack Drop (1 core)      3.45Mpps      3.6Mpps     5%
XDP Drop      (1 core)      14Mpps        16.9Mpps    20%
XDP TX        (1 core)      10.4Mpps      13.7Mpps    31%
====================
Acked-by: NAlexei Starovoitov <ast@kernel.org>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -111,18 +111,13 @@ ...@@ -111,18 +111,13 @@
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
#define MLX5E_ICOSQ_MAX_WQEBBS \ #define MLX5E_ICOSQ_MAX_WQEBBS \
(DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB)) (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
#define MLX5E_XDP_IHS_DS_COUNT \
DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT \ #define MLX5E_XDP_TX_DS_COUNT \
((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
#define MLX5E_XDP_TX_WQEBBS \
DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
#define MLX5E_NUM_MAIN_GROUPS 9 #define MLX5E_NUM_MAIN_GROUPS 9
...@@ -298,19 +293,159 @@ struct mlx5e_cq { ...@@ -298,19 +293,159 @@ struct mlx5e_cq {
struct mlx5_frag_wq_ctrl wq_ctrl; struct mlx5_frag_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_rq; struct mlx5e_tx_wqe_info {
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq, u32 num_bytes;
struct mlx5_cqe64 *cqe); u8 num_wqebbs;
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u8 num_dma;
u16 ix); };
enum mlx5e_dma_map_type {
MLX5E_DMA_MAP_SINGLE,
MLX5E_DMA_MAP_PAGE
};
struct mlx5e_sq_dma {
dma_addr_t addr;
u32 size;
enum mlx5e_dma_map_type type;
};
enum {
MLX5E_SQ_STATE_ENABLED,
};
struct mlx5e_sq_wqe_info {
u8 opcode;
u8 num_wqebbs;
};
struct mlx5e_txqsq {
/* data path */
/* dirtied @completion */
u16 cc;
u32 dma_fifo_cc;
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
/* write@xmit, read@completion */
struct {
struct sk_buff **skb;
struct mlx5e_sq_dma *dma_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} db;
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
u16 max_inline;
u8 min_inline_mode;
u16 edge;
struct device *pdev;
struct mlx5e_tstamp *tstamp;
__be32 mkey_be;
unsigned long state;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
int tc;
u32 rate_limit;
} ____cacheline_aligned_in_smp;
struct mlx5e_xdpsq {
/* data path */
/* dirtied @rx completion */
u16 cc;
u16 pc;
struct mlx5e_cq cq;
/* write@xmit, read@completion */
struct {
struct mlx5e_dma_info *di;
bool doorbell;
} db;
/* read only */
struct mlx5_wq_cyc wq;
void __iomem *uar_map;
u32 sqn;
struct device *pdev;
__be32 mkey_be;
u8 min_inline_mode;
unsigned long state;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
struct mlx5e_icosq {
/* data path */
/* dirtied @completion */
u16 cc;
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
u16 prev_cc;
struct mlx5e_cq cq;
/* write@xmit, read@completion */
struct {
struct mlx5e_sq_wqe_info *ico_wqe;
} db;
/* read only */
struct mlx5_wq_cyc wq;
void __iomem *uar_map;
u32 sqn;
u16 edge;
struct device *pdev;
__be32 mkey_be;
unsigned long state;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix); static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{
return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc));
}
struct mlx5e_dma_info { struct mlx5e_dma_info {
struct page *page; struct page *page;
dma_addr_t addr; dma_addr_t addr;
}; };
struct mlx5e_umr_dma_info {
__be64 *mtt;
dma_addr_t mtt_addr;
struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
struct mlx5e_umr_wqe wqe;
};
struct mlx5e_mpw_info {
struct mlx5e_umr_dma_info umr;
u16 consumed_strides;
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
};
struct mlx5e_rx_am_stats { struct mlx5e_rx_am_stats {
int ppms; /* packets per msec */ int ppms; /* packets per msec */
int epms; /* events per msec */ int epms; /* events per msec */
...@@ -347,6 +482,11 @@ struct mlx5e_page_cache { ...@@ -347,6 +482,11 @@ struct mlx5e_page_cache {
struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE]; struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
}; };
struct mlx5e_rq;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq*, struct mlx5e_rx_wqe*, u16);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
struct mlx5e_rq { struct mlx5e_rq {
/* data path */ /* data path */
struct mlx5_wq_ll wq; struct mlx5_wq_ll wq;
...@@ -381,7 +521,10 @@ struct mlx5e_rq { ...@@ -381,7 +521,10 @@ struct mlx5e_rq {
u16 rx_headroom; u16 rx_headroom;
struct mlx5e_rx_am am; /* Adaptive Moderation */ struct mlx5e_rx_am am; /* Adaptive Moderation */
/* XDP */
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct mlx5e_xdpsq xdpsq;
/* control */ /* control */
struct mlx5_wq_ctrl wq_ctrl; struct mlx5_wq_ctrl wq_ctrl;
...@@ -394,114 +537,6 @@ struct mlx5e_rq { ...@@ -394,114 +537,6 @@ struct mlx5e_rq {
struct mlx5_core_mkey umr_mkey; struct mlx5_core_mkey umr_mkey;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_umr_dma_info {
__be64 *mtt;
dma_addr_t mtt_addr;
struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
struct mlx5e_umr_wqe wqe;
};
struct mlx5e_mpw_info {
struct mlx5e_umr_dma_info umr;
u16 consumed_strides;
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
};
struct mlx5e_tx_wqe_info {
u32 num_bytes;
u8 num_wqebbs;
u8 num_dma;
};
enum mlx5e_dma_map_type {
MLX5E_DMA_MAP_SINGLE,
MLX5E_DMA_MAP_PAGE
};
struct mlx5e_sq_dma {
dma_addr_t addr;
u32 size;
enum mlx5e_dma_map_type type;
};
enum {
MLX5E_SQ_STATE_ENABLED,
MLX5E_SQ_STATE_BF_ENABLE,
};
struct mlx5e_sq_wqe_info {
u8 opcode;
u8 num_wqebbs;
};
enum mlx5e_sq_type {
MLX5E_SQ_TXQ,
MLX5E_SQ_ICO,
MLX5E_SQ_XDP
};
struct mlx5e_sq {
/* data path */
/* dirtied @completion */
u16 cc;
u32 dma_fifo_cc;
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
u16 bf_offset;
u16 prev_cc;
u8 bf_budget;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
/* pointers to per tx element info: write@xmit, read@completion */
union {
struct {
struct sk_buff **skb;
struct mlx5e_sq_dma *dma_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} txq;
struct mlx5e_sq_wqe_info *ico_wqe;
struct {
struct mlx5e_sq_wqe_info *wqe_info;
struct mlx5e_dma_info *di;
bool doorbell;
} xdp;
} db;
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
u16 bf_buf_size;
u16 max_inline;
u8 min_inline_mode;
u16 edge;
struct device *pdev;
struct mlx5e_tstamp *tstamp;
__be32 mkey_be;
unsigned long state;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5_sq_bfreg bfreg;
struct mlx5e_channel *channel;
int tc;
u32 rate_limit;
u8 type;
} ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
{
return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
(sq->cc == sq->pc));
}
enum channel_flags { enum channel_flags {
MLX5E_CHANNEL_NAPI_SCHED = 1, MLX5E_CHANNEL_NAPI_SCHED = 1,
}; };
...@@ -509,9 +544,8 @@ enum channel_flags { ...@@ -509,9 +544,8 @@ enum channel_flags {
struct mlx5e_channel { struct mlx5e_channel {
/* data path */ /* data path */
struct mlx5e_rq rq; struct mlx5e_rq rq;
struct mlx5e_sq xdp_sq; struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
struct mlx5e_sq sq[MLX5E_MAX_NUM_TC]; struct mlx5e_icosq icosq; /* internal control operations */
struct mlx5e_sq icosq; /* internal control operations */
bool xdp; bool xdp;
struct napi_struct napi; struct napi_struct napi;
struct device *pdev; struct device *pdev;
...@@ -693,7 +727,7 @@ struct mlx5e_profile { ...@@ -693,7 +727,7 @@ struct mlx5e_profile {
struct mlx5e_priv { struct mlx5e_priv {
/* priv data path fields - start */ /* priv data path fields - start */
struct mlx5e_sq **txq_to_sq_map; struct mlx5e_txqsq **txq_to_sq_map;
int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
/* priv data path fields - end */ /* priv data path fields - end */
...@@ -734,7 +768,6 @@ struct mlx5e_priv { ...@@ -734,7 +768,6 @@ struct mlx5e_priv {
void mlx5e_build_ptys2ethtool_map(void); void mlx5e_build_ptys2ethtool_map(void);
void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback); void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
...@@ -744,7 +777,9 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); ...@@ -744,7 +777,9 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget); int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_sq_descs(struct mlx5e_sq *sq); bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle); bool recycle);
...@@ -818,28 +853,40 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, ...@@ -818,28 +853,40 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode); u8 cq_period_mode);
void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type); void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, static inline
struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{ {
u16 ofst = sq->bf_offset; u16 pi = *pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
memset(cseg, 0, sizeof(*cseg));
cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
(*pc)++;
return wqe;
}
static inline
void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
void __iomem *uar_map,
struct mlx5_wqe_ctrl_seg *ctrl)
{
ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
/* ensure wqe is visible to device before updating doorbell record */ /* ensure wqe is visible to device before updating doorbell record */
dma_wmb(); dma_wmb();
*sq->wq.db = cpu_to_be32(sq->pc); *wq->db = cpu_to_be32(pc);
/* ensure doorbell record is visible to device before ringing the /* ensure doorbell record is visible to device before ringing the
* doorbell * doorbell
*/ */
wmb(); wmb();
if (bf_sz)
__iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
else
mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
/* flush the write-combining mapped buffer */
wmb();
sq->bf_offset ^= sq->bf_buf_size; mlx5_write64((__be32 *)ctrl, uar_map, NULL);
} }
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
......
...@@ -107,10 +107,18 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) ...@@ -107,10 +107,18 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_dealloc_transport_domain; goto err_dealloc_transport_domain;
} }
err = mlx5_alloc_bfreg(mdev, &res->bfreg, false, false);
if (err) {
mlx5_core_err(mdev, "alloc bfreg failed, %d\n", err);
goto err_destroy_mkey;
}
INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
return 0; return 0;
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, &res->mkey);
err_dealloc_transport_domain: err_dealloc_transport_domain:
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
err_dealloc_pd: err_dealloc_pd:
...@@ -122,6 +130,7 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) ...@@ -122,6 +130,7 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
{ {
struct mlx5e_resources *res = &mdev->mlx5e_res; struct mlx5e_resources *res = &mdev->mlx5e_res;
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, &res->mkey); mlx5_core_destroy_mkey(mdev, &res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn); mlx5_core_dealloc_pd(mdev, res->pdn);
......
...@@ -331,7 +331,7 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, ...@@ -331,7 +331,7 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
{ {
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
struct mlx5e_sq *sq = &rq->channel->icosq; struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *wqe; struct mlx5e_umr_wqe *wqe;
u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
...@@ -341,7 +341,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) ...@@ -341,7 +341,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
sq->db.ico_wqe[pi].num_wqebbs = 1; sq->db.ico_wqe[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, false); mlx5e_post_nop(wq, sq->sqn, &sq->pc);
} }
wqe = mlx5_wq_cyc_get_wqe(wq, pi); wqe = mlx5_wq_cyc_get_wqe(wq, pi);
...@@ -353,7 +353,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) ...@@ -353,7 +353,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs; sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs;
sq->pc += num_wqebbs; sq->pc += num_wqebbs;
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
} }
static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
...@@ -637,37 +637,36 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, ...@@ -637,37 +637,36 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
} }
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq) static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_tx_wqe *wqe; struct mlx5e_tx_wqe *wqe;
u16 pi = (sq->pc - MLX5E_XDP_TX_WQEBBS) & wq->sz_m1; /* last pi */ u16 pi = (sq->pc - 1) & wq->sz_m1; /* last pi */
wqe = mlx5_wq_cyc_get_wqe(wq, pi); wqe = mlx5_wq_cyc_get_wqe(wq, pi);
wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
} }
static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
struct mlx5e_dma_info *di, struct mlx5e_dma_info *di,
const struct xdp_buff *xdp) const struct xdp_buff *xdp)
{ {
struct mlx5e_sq *sq = &rq->channel->xdp_sq; struct mlx5e_xdpsq *sq = &rq->xdpsq;
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = sq->pc & wq->sz_m1; u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5e_sq_wqe_info *wi = &sq->db.xdp.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg; struct mlx5_wqe_data_seg *dseg;
u8 ds_cnt = MLX5E_XDP_TX_DS_COUNT;
ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
dma_addr_t dma_addr = di->addr + data_offset; dma_addr_t dma_addr = di->addr + data_offset;
unsigned int dma_len = xdp->data_end - xdp->data; unsigned int dma_len = xdp->data_end - xdp->data;
prefetchw(wqe);
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) { MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
rq->stats.xdp_drop++; rq->stats.xdp_drop++;
...@@ -675,48 +674,42 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, ...@@ -675,48 +674,42 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
return false; return false;
} }
if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) { if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
if (sq->db.xdp.doorbell) { if (sq->db.doorbell) {
/* SQ is full, ring doorbell */ /* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq); mlx5e_xmit_xdp_doorbell(sq);
sq->db.xdp.doorbell = false; sq->db.doorbell = false;
} }
rq->stats.xdp_tx_full++; rq->stats.xdp_tx_full++;
mlx5e_page_release(rq, di, true); mlx5e_page_release(rq, di, true);
return false; return false;
} }
dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
PCI_DMA_TODEVICE);
memset(wqe, 0, sizeof(*wqe)); cseg->fm_ce_se = 0;
dseg = (struct mlx5_wqe_data_seg *)eseg + 1; dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
/* copy the inline part if required */ /* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
dma_len -= MLX5E_XDP_MIN_INLINE; dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE; dma_addr += MLX5E_XDP_MIN_INLINE;
ds_cnt += MLX5E_XDP_IHS_DS_COUNT;
dseg++; dseg++;
} }
/* write the dma part */ /* write the dma part */
dseg->addr = cpu_to_be64(dma_addr); dseg->addr = cpu_to_be64(dma_addr);
dseg->byte_count = cpu_to_be32(dma_len); dseg->byte_count = cpu_to_be32(dma_len);
dseg->lkey = sq->mkey_be;
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
sq->db.xdp.di[pi] = *di; sq->db.di[pi] = *di;
wi->opcode = MLX5_OPCODE_SEND; sq->pc++;
wi->num_wqebbs = MLX5E_XDP_TX_WQEBBS;
sq->pc += MLX5E_XDP_TX_WQEBBS;
sq->db.xdp.doorbell = true; sq->db.doorbell = true;
rq->stats.xdp_tx++; rq->stats.xdp_tx++;
return true; return true;
} }
...@@ -950,7 +943,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -950,7 +943,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{ {
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq; struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
int work_done = 0; int work_done = 0;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
...@@ -977,9 +970,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) ...@@ -977,9 +970,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe); rq->handle_rx_cqe(rq, cqe);
} }
if (xdp_sq->db.xdp.doorbell) { if (xdpsq->db.doorbell) {
mlx5e_xmit_xdp_doorbell(xdp_sq); mlx5e_xmit_xdp_doorbell(xdpsq);
xdp_sq->db.xdp.doorbell = false; xdpsq->db.doorbell = false;
} }
mlx5_cqwq_update_db_record(&cq->wq); mlx5_cqwq_update_db_record(&cq->wq);
...@@ -989,3 +982,74 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) ...@@ -989,3 +982,74 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
return work_done; return work_done;
} }
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
{
struct mlx5e_xdpsq *sq;
struct mlx5e_rq *rq;
u16 sqcc;
int i;
sq = container_of(cq, struct mlx5e_xdpsq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
rq = container_of(sq, struct mlx5e_rq, xdpsq);
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
*/
sqcc = sq->cc;
for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
struct mlx5_cqe64 *cqe;
u16 wqe_counter;
bool last_wqe;
cqe = mlx5e_get_cqe(cq);
if (!cqe)
break;
mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
struct mlx5e_dma_info *di;
u16 ci;
last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1;
di = &sq->db.di[ci];
sqcc++;
/* Recycle RX page */
mlx5e_page_release(rq, di, true);
} while (!last_wqe);
}
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
wmb();
sq->cc = sqcc;
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
{
struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
struct mlx5e_dma_info *di;
u16 ci;
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
di = &sq->db.di[ci];
sq->cc++;
mlx5e_page_release(rq, di, false);
}
}
...@@ -38,29 +38,6 @@ ...@@ -38,29 +38,6 @@
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM) MLX5E_SQ_NOPS_ROOM)
void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
{
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
memset(cseg, 0, sizeof(*cseg));
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
sq->pc++;
sq->stats.nop++;
if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
}
}
static inline void mlx5e_tx_dma_unmap(struct device *pdev, static inline void mlx5e_tx_dma_unmap(struct device *pdev,
struct mlx5e_sq_dma *dma) struct mlx5e_sq_dma *dma)
{ {
...@@ -76,25 +53,25 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev, ...@@ -76,25 +53,25 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev,
} }
} }
static inline void mlx5e_dma_push(struct mlx5e_sq *sq, static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
dma_addr_t addr, dma_addr_t addr,
u32 size, u32 size,
enum mlx5e_dma_map_type map_type) enum mlx5e_dma_map_type map_type)
{ {
u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask; u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
sq->db.txq.dma_fifo[i].addr = addr; sq->db.dma_fifo[i].addr = addr;
sq->db.txq.dma_fifo[i].size = size; sq->db.dma_fifo[i].size = size;
sq->db.txq.dma_fifo[i].type = map_type; sq->db.dma_fifo[i].type = map_type;
sq->dma_fifo_pc++; sq->dma_fifo_pc++;
} }
static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i) static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
{ {
return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask]; return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
} }
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma) static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
{ {
int i; int i;
...@@ -175,25 +152,6 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, ...@@ -175,25 +152,6 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
} }
} }
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
struct sk_buff *skb, bool bf)
{
/* Some NIC TX decisions, e.g loopback, are based on the packet
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
if (bf) {
u16 ihs = skb_headlen(skb);
if (skb_vlan_tag_present(skb))
ihs += VLAN_HLEN;
if (ihs <= sq->max_inline)
return skb_headlen(skb);
}
return mlx5e_calc_min_inline(sq->min_inline_mode, skb);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
unsigned int *skb_len, unsigned int *skb_len,
unsigned int len) unsigned int len)
...@@ -218,13 +176,13 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs, ...@@ -218,13 +176,13 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz); mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
} }
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = sq->pc & wq->sz_m1; u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5e_tx_wqe_info *wi = &sq->db.txq.wqe_info[pi]; struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
...@@ -235,7 +193,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -235,7 +193,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u8 opcode = MLX5_OPCODE_SEND; u8 opcode = MLX5_OPCODE_SEND;
dma_addr_t dma_addr = 0; dma_addr_t dma_addr = 0;
unsigned int num_bytes; unsigned int num_bytes;
bool bf = false;
u16 headlen; u16 headlen;
u16 ds_cnt; u16 ds_cnt;
u16 ihs; u16 ihs;
...@@ -255,11 +212,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -255,11 +212,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
} else } else
sq->stats.csum_none++; sq->stats.csum_none++;
if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc;
sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
}
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
opcode = MLX5_OPCODE_LSO; opcode = MLX5_OPCODE_LSO;
...@@ -277,10 +229,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -277,10 +229,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.packets += skb_shinfo(skb)->gso_segs; sq->stats.packets += skb_shinfo(skb)->gso_segs;
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
} else { } else {
bf = sq->bf_budget && ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
!skb->xmit_more &&
!skb_shinfo(skb)->nr_frags;
ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
sq->stats.packets++; sq->stats.packets++;
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
} }
...@@ -349,7 +298,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -349,7 +298,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
sq->db.txq.skb[pi] = skb; sq->db.skb[pi] = skb;
wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
sq->pc += wi->num_wqebbs; sq->pc += wi->num_wqebbs;
...@@ -359,31 +308,23 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -359,31 +308,23 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) { if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
MLX5E_SQ_STOP_ROOM))) {
netif_tx_stop_queue(sq->txq); netif_tx_stop_queue(sq->txq);
sq->stats.stopped++; sq->stats.stopped++;
} }
sq->stats.xmit_more += skb->xmit_more; sq->stats.xmit_more += skb->xmit_more;
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
int bf_sz = 0; mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
bf_sz = wi->num_wqebbs << 3;
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
}
/* fill sq edge with nops to avoid wqe wrap around */ /* fill sq edge with nops to avoid wqe wrap around */
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
sq->db.txq.skb[pi] = NULL; sq->db.skb[pi] = NULL;
mlx5e_send_nop(sq, false); mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
sq->stats.nop++;
} }
if (bf)
sq->bf_budget--;
return NETDEV_TX_OK; return NETDEV_TX_OK;
dma_unmap_wqe_err: dma_unmap_wqe_err:
...@@ -398,21 +339,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -398,21 +339,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)]; struct mlx5e_txqsq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
return mlx5e_sq_xmit(sq, skb); return mlx5e_sq_xmit(sq, skb);
} }
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{ {
struct mlx5e_sq *sq; struct mlx5e_txqsq *sq;
u32 dma_fifo_cc; u32 dma_fifo_cc;
u32 nbytes; u32 nbytes;
u16 npkts; u16 npkts;
u16 sqcc; u16 sqcc;
int i; int i;
sq = container_of(cq, struct mlx5e_sq, cq); sq = container_of(cq, struct mlx5e_txqsq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false; return false;
...@@ -450,8 +391,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -450,8 +391,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
last_wqe = (sqcc == wqe_counter); last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1; ci = sqcc & sq->wq.sz_m1;
skb = sq->db.txq.skb[ci]; skb = sq->db.skb[ci];
wi = &sq->db.txq.wqe_info[ci]; wi = &sq->db.wqe_info[ci];
if (unlikely(!skb)) { /* nop */ if (unlikely(!skb)) { /* nop */
sqcc++; sqcc++;
...@@ -492,7 +433,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -492,7 +433,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes); netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) && if (netif_tx_queue_stopped(sq->txq) &&
mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) { mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) {
netif_tx_wake_queue(sq->txq); netif_tx_wake_queue(sq->txq);
sq->stats.wake++; sq->stats.wake++;
} }
...@@ -500,7 +441,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -500,7 +441,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
return (i == MLX5E_TX_CQ_POLL_BUDGET); return (i == MLX5E_TX_CQ_POLL_BUDGET);
} }
static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq) void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
{ {
struct mlx5e_tx_wqe_info *wi; struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -509,8 +450,8 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq) ...@@ -509,8 +450,8 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
while (sq->cc != sq->pc) { while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1; ci = sq->cc & sq->wq.sz_m1;
skb = sq->db.txq.skb[ci]; skb = sq->db.skb[ci];
wi = &sq->db.txq.wqe_info[ci]; wi = &sq->db.wqe_info[ci];
if (!skb) { /* nop */ if (!skb) { /* nop */
sq->cc++; sq->cc++;
...@@ -528,37 +469,3 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq) ...@@ -528,37 +469,3 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
sq->cc += wi->num_wqebbs; sq->cc += wi->num_wqebbs;
} }
} }
static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq)
{
struct mlx5e_sq_wqe_info *wi;
struct mlx5e_dma_info *di;
u16 ci;
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
di = &sq->db.xdp.di[ci];
wi = &sq->db.xdp.wqe_info[ci];
if (wi->opcode == MLX5_OPCODE_NOP) {
sq->cc++;
continue;
}
sq->cc += wi->num_wqebbs;
mlx5e_page_release(&sq->channel->rq, di, false);
}
}
void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
{
switch (sq->type) {
case MLX5E_SQ_TXQ:
mlx5e_free_txq_sq_descs(sq);
break;
case MLX5E_SQ_XDP:
mlx5e_free_xdp_sq_descs(sq);
break;
}
}
...@@ -44,14 +44,14 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) ...@@ -44,14 +44,14 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
return NULL; return NULL;
/* ensure cqe content is read after cqe ownership bit */ /* ensure cqe content is read after cqe ownership bit */
rmb(); dma_rmb();
return cqe; return cqe;
} }
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{ {
struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq); struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_wq_cyc *wq; struct mlx5_wq_cyc *wq;
struct mlx5_cqe64 *cqe; struct mlx5_cqe64 *cqe;
u16 sqcc; u16 sqcc;
...@@ -105,66 +105,6 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -105,66 +105,6 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sq->cc = sqcc; sq->cc = sqcc;
} }
static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
{
struct mlx5e_sq *sq;
u16 sqcc;
int i;
sq = container_of(cq, struct mlx5e_sq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
*/
sqcc = sq->cc;
for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
struct mlx5_cqe64 *cqe;
u16 wqe_counter;
bool last_wqe;
cqe = mlx5e_get_cqe(cq);
if (!cqe)
break;
mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
struct mlx5e_sq_wqe_info *wi;
struct mlx5e_dma_info *di;
u16 ci;
last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1;
di = &sq->db.xdp.di[ci];
wi = &sq->db.xdp.wqe_info[ci];
if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
sqcc++;
continue;
}
sqcc += wi->num_wqebbs;
/* Recycle RX page */
mlx5e_page_release(&sq->channel->rq, di, true);
} while (!last_wqe);
}
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
wmb();
sq->cc = sqcc;
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
int mlx5e_napi_poll(struct napi_struct *napi, int budget) int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{ {
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
...@@ -178,12 +118,12 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) ...@@ -178,12 +118,12 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++) for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget; busy |= work_done == budget;
if (c->xdp)
busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq);
mlx5e_poll_ico_cq(&c->icosq.cq); mlx5e_poll_ico_cq(&c->icosq.cq);
busy |= mlx5e_post_rx_wqes(&c->rq); busy |= mlx5e_post_rx_wqes(&c->rq);
......
...@@ -728,6 +728,7 @@ struct mlx5e_resources { ...@@ -728,6 +728,7 @@ struct mlx5e_resources {
u32 pdn; u32 pdn;
struct mlx5_td td; struct mlx5_td td;
struct mlx5_core_mkey mkey; struct mlx5_core_mkey mkey;
struct mlx5_sq_bfreg bfreg;
}; };
struct mlx5_core_dev { struct mlx5_core_dev {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册