提交 31391048 编写于 作者: S Saeed Mahameed 提交者: David S. Miller

net/mlx5e: Different SQ types

Different SQ types (tx, xdp, ico) are growing apart, we separate them
and remove unwanted parts in each one of them, to simplify data path and
utilize data cache.

Remove DB union from SQ structures since it is not needed anymore as we
now have different SQ data type for each SQ.
Signed-off-by: NSaeed Mahameed <saeedm@mellanox.com>
Reviewed-by: NTariq Toukan <tariqt@mellanox.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 33ad9711
......@@ -319,13 +319,7 @@ struct mlx5e_sq_wqe_info {
u8 num_wqebbs;
};
enum mlx5e_sq_type {
MLX5E_SQ_TXQ,
MLX5E_SQ_ICO,
MLX5E_SQ_XDP
};
struct mlx5e_sq {
struct mlx5e_txqsq {
/* data path */
/* dirtied @completion */
......@@ -339,18 +333,11 @@ struct mlx5e_sq {
struct mlx5e_cq cq;
/* pointers to per tx element info: write@xmit, read@completion */
union {
struct {
struct sk_buff **skb;
struct mlx5e_sq_dma *dma_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} txq;
struct mlx5e_sq_wqe_info *ico_wqe;
struct {
struct mlx5e_dma_info *di;
bool doorbell;
} xdp;
/* write@xmit, read@completion */
struct {
struct sk_buff **skb;
struct mlx5e_sq_dma *dma_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} db;
/* read only */
......@@ -372,7 +359,67 @@ struct mlx5e_sq {
struct mlx5e_channel *channel;
int tc;
u32 rate_limit;
u8 type;
} ____cacheline_aligned_in_smp;
struct mlx5e_xdpsq {
/* data path */
/* dirtied @rx completion */
u16 cc;
u16 pc;
struct mlx5e_cq cq;
/* write@xmit, read@completion */
struct {
struct mlx5e_dma_info *di;
bool doorbell;
} db;
/* read only */
struct mlx5_wq_cyc wq;
void __iomem *uar_map;
u32 sqn;
struct device *pdev;
__be32 mkey_be;
u8 min_inline_mode;
unsigned long state;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
struct mlx5e_icosq {
/* data path */
/* dirtied @completion */
u16 cc;
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
u16 prev_cc;
struct mlx5e_cq cq;
/* write@xmit, read@completion */
struct {
struct mlx5e_sq_wqe_info *ico_wqe;
} db;
/* read only */
struct mlx5_wq_cyc wq;
void __iomem *uar_map;
u32 sqn;
u16 edge;
struct device *pdev;
__be32 mkey_be;
unsigned long state;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
static inline bool
......@@ -477,7 +524,7 @@ struct mlx5e_rq {
/* XDP */
struct bpf_prog *xdp_prog;
struct mlx5e_sq xdpsq;
struct mlx5e_xdpsq xdpsq;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
......@@ -497,8 +544,8 @@ enum channel_flags {
struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
struct mlx5e_sq icosq; /* internal control operations */
struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
struct mlx5e_icosq icosq; /* internal control operations */
bool xdp;
struct napi_struct napi;
struct device *pdev;
......@@ -680,7 +727,7 @@ struct mlx5e_profile {
struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_sq **txq_to_sq_map;
struct mlx5e_txqsq **txq_to_sq_map;
int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
struct bpf_prog *xdp_prog;
/* priv data path fields - end */
......@@ -731,8 +778,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_sq *sq);
void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle);
......
......@@ -331,7 +331,7 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
struct mlx5e_sq *sq = &rq->channel->icosq;
struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *wqe;
u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
......@@ -342,7 +342,6 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
sq->db.ico_wqe[pi].num_wqebbs = 1;
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
sq->stats.nop++;
}
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
......@@ -638,7 +637,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_tx_wqe *wqe;
......@@ -653,9 +652,9 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
struct mlx5e_dma_info *di,
const struct xdp_buff *xdp)
{
struct mlx5e_sq *sq = &rq->xdpsq;
struct mlx5e_xdpsq *sq = &rq->xdpsq;
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = sq->pc & wq->sz_m1;
u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
......@@ -676,10 +675,10 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
}
if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
if (sq->db.xdp.doorbell) {
if (sq->db.doorbell) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->db.xdp.doorbell = false;
sq->db.doorbell = false;
}
rq->stats.xdp_tx_full++;
mlx5e_page_release(rq, di, true);
......@@ -707,10 +706,10 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
sq->db.xdp.di[pi] = *di;
sq->db.di[pi] = *di;
sq->pc++;
sq->db.xdp.doorbell = true;
sq->db.doorbell = true;
rq->stats.xdp_tx++;
return true;
}
......@@ -944,7 +943,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
struct mlx5e_sq *xdpsq = &rq->xdpsq;
struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
int work_done = 0;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
......@@ -971,9 +970,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
}
if (xdpsq->db.xdp.doorbell) {
if (xdpsq->db.doorbell) {
mlx5e_xmit_xdp_doorbell(xdpsq);
xdpsq->db.xdp.doorbell = false;
xdpsq->db.doorbell = false;
}
mlx5_cqwq_update_db_record(&cq->wq);
......@@ -986,12 +985,12 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
{
struct mlx5e_sq *sq;
struct mlx5e_xdpsq *sq;
struct mlx5e_rq *rq;
u16 sqcc;
int i;
sq = container_of(cq, struct mlx5e_sq, cq);
sq = container_of(cq, struct mlx5e_xdpsq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
......@@ -1023,7 +1022,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1;
di = &sq->db.xdp.di[ci];
di = &sq->db.di[ci];
sqcc++;
/* Recycle RX page */
......@@ -1040,7 +1039,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
void mlx5e_free_xdpsq_descs(struct mlx5e_sq *sq)
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
{
struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
struct mlx5e_dma_info *di;
......@@ -1048,7 +1047,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_sq *sq)
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
di = &sq->db.xdp.di[ci];
di = &sq->db.di[ci];
sq->cc++;
mlx5e_page_release(rq, di, false);
......
......@@ -53,25 +53,25 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev,
}
}
static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
dma_addr_t addr,
u32 size,
enum mlx5e_dma_map_type map_type)
{
u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
sq->db.txq.dma_fifo[i].addr = addr;
sq->db.txq.dma_fifo[i].size = size;
sq->db.txq.dma_fifo[i].type = map_type;
sq->db.dma_fifo[i].addr = addr;
sq->db.dma_fifo[i].size = size;
sq->db.dma_fifo[i].type = map_type;
sq->dma_fifo_pc++;
}
static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
{
return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask];
return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
}
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
{
int i;
......@@ -176,13 +176,13 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
}
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
{
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5e_tx_wqe_info *wi = &sq->db.txq.wqe_info[pi];
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
......@@ -298,7 +298,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
sq->db.txq.skb[pi] = skb;
sq->db.skb[pi] = skb;
wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
sq->pc += wi->num_wqebbs;
......@@ -320,7 +320,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
/* fill sq edge with nops to avoid wqe wrap around */
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
sq->db.txq.skb[pi] = NULL;
sq->db.skb[pi] = NULL;
mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
sq->stats.nop++;
}
......@@ -339,21 +339,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
struct mlx5e_txqsq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
return mlx5e_sq_xmit(sq, skb);
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq *sq;
struct mlx5e_txqsq *sq;
u32 dma_fifo_cc;
u32 nbytes;
u16 npkts;
u16 sqcc;
int i;
sq = container_of(cq, struct mlx5e_sq, cq);
sq = container_of(cq, struct mlx5e_txqsq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
......@@ -391,8 +391,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1;
skb = sq->db.txq.skb[ci];
wi = &sq->db.txq.wqe_info[ci];
skb = sq->db.skb[ci];
wi = &sq->db.wqe_info[ci];
if (unlikely(!skb)) { /* nop */
sqcc++;
......@@ -441,7 +441,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
......@@ -450,8 +450,8 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
skb = sq->db.txq.skb[ci];
wi = &sq->db.txq.wqe_info[ci];
skb = sq->db.skb[ci];
wi = &sq->db.wqe_info[ci];
if (!skb) { /* nop */
sq->cc++;
......@@ -469,15 +469,3 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
sq->cc += wi->num_wqebbs;
}
}
void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
{
switch (sq->type) {
case MLX5E_SQ_TXQ:
mlx5e_free_txq_sq_descs(sq);
break;
case MLX5E_SQ_XDP:
mlx5e_free_xdpsq_descs(sq);
break;
}
}
......@@ -51,7 +51,7 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_wq_cyc *wq;
struct mlx5_cqe64 *cqe;
u16 sqcc;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册