提交 eba2db2b 编写于 作者: S Saeed Mahameed 提交者: David S. Miller

net/mlx5e: Move mlx5e_rq struct declaration

Move struct mlx5e_rq and friends to appear after mlx5e_sq declaration in
en.h.

We will need this for next patch to move the mlx5e_sq instance into
mlx5e_rq struct for XDP SQs.
Signed-off-by: NSaeed Mahameed <saeedm@mellanox.com>
Reviewed-by: NTariq Toukan <tariqt@mellanox.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 1c4bf940
...@@ -297,19 +297,113 @@ struct mlx5e_cq { ...@@ -297,19 +297,113 @@ struct mlx5e_cq {
struct mlx5_frag_wq_ctrl wq_ctrl; struct mlx5_frag_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_rq; struct mlx5e_tx_wqe_info {
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq, u32 num_bytes;
struct mlx5_cqe64 *cqe); u8 num_wqebbs;
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u8 num_dma;
u16 ix); };
enum mlx5e_dma_map_type {
MLX5E_DMA_MAP_SINGLE,
MLX5E_DMA_MAP_PAGE
};
struct mlx5e_sq_dma {
dma_addr_t addr;
u32 size;
enum mlx5e_dma_map_type type;
};
enum {
MLX5E_SQ_STATE_ENABLED,
};
struct mlx5e_sq_wqe_info {
u8 opcode;
u8 num_wqebbs;
};
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix); enum mlx5e_sq_type {
MLX5E_SQ_TXQ,
MLX5E_SQ_ICO,
MLX5E_SQ_XDP
};
struct mlx5e_sq {
/* data path */
/* dirtied @completion */
u16 cc;
u32 dma_fifo_cc;
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
/* pointers to per tx element info: write@xmit, read@completion */
union {
struct {
struct sk_buff **skb;
struct mlx5e_sq_dma *dma_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} txq;
struct mlx5e_sq_wqe_info *ico_wqe;
struct {
struct mlx5e_sq_wqe_info *wqe_info;
struct mlx5e_dma_info *di;
bool doorbell;
} xdp;
} db;
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
u16 max_inline;
u8 min_inline_mode;
u16 edge;
struct device *pdev;
struct mlx5e_tstamp *tstamp;
__be32 mkey_be;
unsigned long state;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
int tc;
u32 rate_limit;
u8 type;
} ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
{
return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
(sq->cc == sq->pc));
}
struct mlx5e_dma_info { struct mlx5e_dma_info {
struct page *page; struct page *page;
dma_addr_t addr; dma_addr_t addr;
}; };
struct mlx5e_umr_dma_info {
__be64 *mtt;
dma_addr_t mtt_addr;
struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
struct mlx5e_umr_wqe wqe;
};
struct mlx5e_mpw_info {
struct mlx5e_umr_dma_info umr;
u16 consumed_strides;
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
};
struct mlx5e_rx_am_stats { struct mlx5e_rx_am_stats {
int ppms; /* packets per msec */ int ppms; /* packets per msec */
int epms; /* events per msec */ int epms; /* events per msec */
...@@ -346,6 +440,11 @@ struct mlx5e_page_cache { ...@@ -346,6 +440,11 @@ struct mlx5e_page_cache {
struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE]; struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
}; };
struct mlx5e_rq;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq*, struct mlx5e_rx_wqe*, u16);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
struct mlx5e_rq { struct mlx5e_rq {
/* data path */ /* data path */
struct mlx5_wq_ll wq; struct mlx5_wq_ll wq;
...@@ -393,108 +492,6 @@ struct mlx5e_rq { ...@@ -393,108 +492,6 @@ struct mlx5e_rq {
struct mlx5_core_mkey umr_mkey; struct mlx5_core_mkey umr_mkey;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_umr_dma_info {
__be64 *mtt;
dma_addr_t mtt_addr;
struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
struct mlx5e_umr_wqe wqe;
};
struct mlx5e_mpw_info {
struct mlx5e_umr_dma_info umr;
u16 consumed_strides;
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
};
struct mlx5e_tx_wqe_info {
u32 num_bytes;
u8 num_wqebbs;
u8 num_dma;
};
enum mlx5e_dma_map_type {
MLX5E_DMA_MAP_SINGLE,
MLX5E_DMA_MAP_PAGE
};
struct mlx5e_sq_dma {
dma_addr_t addr;
u32 size;
enum mlx5e_dma_map_type type;
};
enum {
MLX5E_SQ_STATE_ENABLED,
};
struct mlx5e_sq_wqe_info {
u8 opcode;
u8 num_wqebbs;
};
enum mlx5e_sq_type {
MLX5E_SQ_TXQ,
MLX5E_SQ_ICO,
MLX5E_SQ_XDP
};
struct mlx5e_sq {
/* data path */
/* dirtied @completion */
u16 cc;
u32 dma_fifo_cc;
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
/* pointers to per tx element info: write@xmit, read@completion */
union {
struct {
struct sk_buff **skb;
struct mlx5e_sq_dma *dma_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} txq;
struct mlx5e_sq_wqe_info *ico_wqe;
struct {
struct mlx5e_sq_wqe_info *wqe_info;
struct mlx5e_dma_info *di;
bool doorbell;
} xdp;
} db;
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
u16 max_inline;
u8 min_inline_mode;
u16 edge;
struct device *pdev;
struct mlx5e_tstamp *tstamp;
__be32 mkey_be;
unsigned long state;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
int tc;
u32 rate_limit;
u8 type;
} ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
{
return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
(sq->cc == sq->pc));
}
enum channel_flags { enum channel_flags {
MLX5E_CHANNEL_NAPI_SCHED = 1, MLX5E_CHANNEL_NAPI_SCHED = 1,
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册