提交 0a9d74d1 编写于 作者: M Michael S. Tsirkin

Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux into HEAD

Merge shared code that's necessary for mlx5 vdpa bits to build.

The branch itself includes a small number of patches and is also
independently merged by a couple of other trees.
That shouldn't cause any conflicts by itself.

Saeed Mahameed <saeedm@mellanox.com> says:

---

mlx5-next is a very small branch based on a very early rc that includes
mlx5 shared stuff between rdma and net-next, and now virtio as well.

---
Signed-off-by: NMichael S. Tsirkin <mst@redhat.com>
......@@ -299,11 +299,18 @@ void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
}
EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
{
int i;
WARN_ON(perm & 0xfc);
for (i = 0; i < buf->npages; i++)
pas[i] = cpu_to_be64(buf->frags[i].map);
pas[i] = cpu_to_be64(buf->frags[i].map | perm);
}
EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array_perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
{
mlx5_fill_page_frag_array_perm(buf, pas, 0);
}
EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);
......@@ -23,6 +23,9 @@ static const char *const mlx5_rsc_sgmt_name[] = {
MLX5_SGMT_STR_ASSING(SX_SLICE_ALL),
MLX5_SGMT_STR_ASSING(RDB),
MLX5_SGMT_STR_ASSING(RX_SLICE_ALL),
MLX5_SGMT_STR_ASSING(PRM_QUERY_QP),
MLX5_SGMT_STR_ASSING(PRM_QUERY_CQ),
MLX5_SGMT_STR_ASSING(PRM_QUERY_MKEY),
};
struct mlx5_rsc_dump {
......@@ -130,11 +133,13 @@ struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
cmd->mem_size = key->size;
return cmd;
}
EXPORT_SYMBOL(mlx5_rsc_dump_cmd_create);
void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd)
{
kfree(cmd);
}
EXPORT_SYMBOL(mlx5_rsc_dump_cmd_destroy);
int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
struct page *page, int *size)
......@@ -155,6 +160,7 @@ int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
return more_dump;
}
EXPORT_SYMBOL(mlx5_rsc_dump_next);
#define MLX5_RSC_DUMP_MENU_SEGMENT 0xffff
static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
......
......@@ -4,41 +4,10 @@
#ifndef __MLX5_RSC_DUMP_H
#define __MLX5_RSC_DUMP_H
#include <linux/mlx5/rsc_dump.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
enum mlx5_sgmt_type {
MLX5_SGMT_TYPE_HW_CQPC,
MLX5_SGMT_TYPE_HW_SQPC,
MLX5_SGMT_TYPE_HW_RQPC,
MLX5_SGMT_TYPE_FULL_SRQC,
MLX5_SGMT_TYPE_FULL_CQC,
MLX5_SGMT_TYPE_FULL_EQC,
MLX5_SGMT_TYPE_FULL_QPC,
MLX5_SGMT_TYPE_SND_BUFF,
MLX5_SGMT_TYPE_RCV_BUFF,
MLX5_SGMT_TYPE_SRQ_BUFF,
MLX5_SGMT_TYPE_CQ_BUFF,
MLX5_SGMT_TYPE_EQ_BUFF,
MLX5_SGMT_TYPE_SX_SLICE,
MLX5_SGMT_TYPE_SX_SLICE_ALL,
MLX5_SGMT_TYPE_RDB,
MLX5_SGMT_TYPE_RX_SLICE_ALL,
MLX5_SGMT_TYPE_MENU,
MLX5_SGMT_TYPE_TERMINATE,
MLX5_SGMT_TYPE_NUM, /* Keep last */
};
struct mlx5_rsc_key {
enum mlx5_sgmt_type rsc;
int index1;
int index2;
int num_of_obj1;
int num_of_obj2;
int size;
};
#define MLX5_RSC_DUMP_ALL 0xFFFF
struct mlx5_rsc_dump_cmd;
struct mlx5_rsc_dump;
......
......@@ -182,7 +182,7 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
{
return cseg && !!cseg->tisn;
return cseg && !!cseg->tis_tir_num;
}
static inline u8
......
......@@ -19,7 +19,7 @@
#define MLX5E_KTLS_PROGRESS_WQE_SZ \
(offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \
MLX5_ST_SZ_BYTES(tls_progress_params))
sizeof(struct mlx5_wqe_tls_progress_params_seg))
#define MLX5E_KTLS_PROGRESS_WQEBBS \
(DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
......
......@@ -64,7 +64,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
STATIC_PARAMS_DS_CNT);
cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
cseg->tis_tir_num = cpu_to_be32(priv_tx->tisn << 8);
ucseg->flags = MLX5_UMR_INLINE;
ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
......@@ -75,10 +75,14 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
static void
fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
{
MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
MLX5_SET(tls_progress_params, ctx, record_tracker_state,
struct mlx5_wqe_tls_progress_params_seg *params;
params = ctx;
params->tis_tir_num = cpu_to_be32(priv_tx->tisn);
MLX5_SET(tls_progress_params, params->ctx, record_tracker_state,
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
MLX5_SET(tls_progress_params, ctx, auth_state,
MLX5_SET(tls_progress_params, params->ctx, auth_state,
MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
}
......@@ -284,7 +288,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
cseg->tisn = cpu_to_be32(tisn << 8);
cseg->tis_tir_num = cpu_to_be32(tisn << 8);
cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
fsz = skb_frag_size(frag);
......
......@@ -305,7 +305,7 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
struct mlx5e_accel_tx_tls_state *state)
{
cseg->tisn = cpu_to_be32(state->tls_tisn << 8);
cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
}
static int tls_update_resync_sn(struct net_device *netdev,
......
......@@ -44,16 +44,6 @@
#include "lib/mpfs.h"
#include "en/tc_ct.h"
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
#define FDB_TC_MAX_PRIO 16
#define FDB_TC_LEVELS_PER_PRIO 2
#ifdef CONFIG_MLX5_ESWITCH
#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
......
......@@ -41,7 +41,6 @@
#include "diag/fs_tracepoint.h"
#include "accel/ipsec.h"
#include "fpga/ipsec.h"
#include "eswitch.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
......@@ -1581,6 +1580,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
static bool counter_is_valid(u32 action)
{
return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_ALLOW |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
}
......
......@@ -39,6 +39,16 @@
#include <linux/llist.h>
#include <steering/fs_dr.h>
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
#define FDB_TC_MAX_PRIO 16
#define FDB_TC_LEVELS_PER_PRIO 2
struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
union {
......
......@@ -226,13 +226,20 @@ void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
{
unsigned char *dev_addr = priv->netdev->dev_addr;
u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
struct mlx5i_priv *ipriv = priv->ppriv;
void *addr_path;
int qpn = 0;
int ret = 0;
void *qpc;
if (MLX5_CAP_GEN(priv->mdev, mkey_by_name)) {
qpn = (dev_addr[1] << 16) + (dev_addr[2] << 8) + dev_addr[3];
MLX5_SET(create_qp_in, in, input_qpn, qpn);
}
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
......
......@@ -557,6 +557,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, release_all_pages))
MLX5_SET(cmd_hca_cap, set_hca_cap, release_all_pages, 1);
if (MLX5_CAP_GEN_MAX(dev, mkey_by_name))
MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1);
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
......
......@@ -33,7 +33,6 @@
#ifndef MLX5_CORE_CQ_H
#define MLX5_CORE_CQ_H
#include <rdma/ib_verbs.h>
#include <linux/mlx5/driver.h>
#include <linux/refcount.h>
......
......@@ -458,6 +458,15 @@ enum {
MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
};
struct mlx5_wqe_tls_static_params_seg {
u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
};
struct mlx5_wqe_tls_progress_params_seg {
__be32 tis_tir_num;
u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
};
enum {
MLX5_SET_PORT_RESET_QKEY = 0,
MLX5_SET_PORT_GUID0 = 16,
......@@ -1352,11 +1361,11 @@ enum mlx5_qcam_feature_groups {
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET(device_virtio_emulation_cap, \
MLX5_GET(virtio_emulation_cap, \
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET64(device_virtio_emulation_cap, \
MLX5_GET64(virtio_emulation_cap, \
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
#define MLX5_CAP_IPSEC(mdev, cap)\
......
......@@ -972,6 +972,7 @@ void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn);
......@@ -1054,6 +1055,7 @@ enum {
enum {
MLX5_INTERFACE_PROTOCOL_IB = 0,
MLX5_INTERFACE_PROTOCOL_ETH = 1,
MLX5_INTERFACE_PROTOCOL_VDPA = 2,
};
struct mlx5_interface {
......
......@@ -93,6 +93,7 @@ enum {
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
MLX5_OBJ_TYPE_PSV = 0xff03,
......@@ -981,17 +982,40 @@ struct mlx5_ifc_device_event_cap_bits {
u8 user_unaffiliated_events[4][0x40];
};
struct mlx5_ifc_device_virtio_emulation_cap_bits {
u8 reserved_at_0[0x20];
struct mlx5_ifc_virtio_emulation_cap_bits {
u8 desc_tunnel_offload_type[0x1];
u8 eth_frame_offload_type[0x1];
u8 virtio_version_1_0[0x1];
u8 device_features_bits_mask[0xd];
u8 event_mode[0x8];
u8 virtio_queue_type[0x8];
u8 reserved_at_20[0x13];
u8 max_tunnel_desc[0x10];
u8 reserved_at_30[0x3];
u8 log_doorbell_stride[0x5];
u8 reserved_at_38[0x3];
u8 log_doorbell_bar_size[0x5];
u8 doorbell_bar_offset[0x40];
u8 reserved_at_80[0x780];
u8 max_emulated_devices[0x8];
u8 max_num_virtio_queues[0x18];
u8 reserved_at_a0[0x60];
u8 umem_1_buffer_param_a[0x20];
u8 umem_1_buffer_param_b[0x20];
u8 umem_2_buffer_param_a[0x20];
u8 umem_2_buffer_param_b[0x20];
u8 umem_3_buffer_param_a[0x20];
u8 umem_3_buffer_param_b[0x20];
u8 reserved_at_1c0[0x640];
};
enum {
......@@ -1216,7 +1240,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
u8 reserved_at_d0[0xb];
u8 relaxed_ordering_write_umr[0x1];
u8 relaxed_ordering_read_umr[0x1];
u8 reserved_at_d2[0x7];
u8 virtio_net_device_emualtion_manager[0x1];
u8 virtio_blk_device_emualtion_manager[0x1];
u8 log_max_cq[0x5];
u8 log_max_eq_sz[0x8];
......@@ -1392,7 +1420,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 bf[0x1];
u8 driver_version[0x1];
u8 pad_tx_eth_packet[0x1];
u8 reserved_at_263[0x8];
u8 reserved_at_263[0x3];
u8 mkey_by_name[0x1];
u8 reserved_at_267[0x4];
u8 log_bf_reg_size[0x5];
u8 reserved_at_270[0x8];
......@@ -2949,7 +2980,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
struct mlx5_ifc_device_virtio_emulation_cap_bits virtio_emulation_cap;
struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
u8 reserved_at_0[0x8000];
};
......@@ -3295,15 +3326,18 @@ struct mlx5_ifc_scheduling_context_bits {
};
struct mlx5_ifc_rqtc_bits {
u8 reserved_at_0[0xa0];
u8 reserved_at_0[0xa0];
u8 reserved_at_a0[0x10];
u8 rqt_max_size[0x10];
u8 reserved_at_a0[0x5];
u8 list_q_type[0x3];
u8 reserved_at_a8[0x8];
u8 rqt_max_size[0x10];
u8 reserved_at_c0[0x10];
u8 rqt_actual_size[0x10];
u8 rq_vhca_id_format[0x1];
u8 reserved_at_c1[0xf];
u8 rqt_actual_size[0x10];
u8 reserved_at_e0[0x6a0];
u8 reserved_at_e0[0x6a0];
struct mlx5_ifc_rq_num_bits rq_num[];
};
......@@ -7084,7 +7118,7 @@ struct mlx5_ifc_destroy_mkey_out_bits {
struct mlx5_ifc_destroy_mkey_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
......@@ -7715,8 +7749,10 @@ struct mlx5_ifc_create_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x40];
u8 reserved_at_40[0x8];
u8 input_qpn[0x18];
u8 reserved_at_60[0x20];
u8 opt_param_mask[0x20];
u8 ece[0x20];
......@@ -7780,7 +7816,7 @@ struct mlx5_ifc_create_mkey_out_bits {
struct mlx5_ifc_create_mkey_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
......@@ -10338,6 +10374,40 @@ struct mlx5_ifc_create_umem_in_bits {
struct mlx5_ifc_umem_bits umem;
};
struct mlx5_ifc_create_umem_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x8];
u8 umem_id[0x18];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_umem_in_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x8];
u8 umem_id[0x18];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_umem_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_create_uctx_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
......@@ -10350,6 +10420,18 @@ struct mlx5_ifc_create_uctx_in_bits {
struct mlx5_ifc_uctx_bits uctx;
};
struct mlx5_ifc_create_uctx_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x10];
u8 uid[0x10];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_uctx_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
......@@ -10363,6 +10445,15 @@ struct mlx5_ifc_destroy_uctx_in_bits {
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_uctx_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_create_sw_icm_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_sw_icm_bits sw_icm;
......@@ -10669,17 +10760,20 @@ struct mlx5_ifc_tls_static_params_bits {
};
struct mlx5_ifc_tls_progress_params_bits {
u8 reserved_at_0[0x8];
u8 tisn[0x18];
u8 next_record_tcp_sn[0x20];
u8 hw_resync_tcp_sn[0x20];
u8 record_tracker_state[0x2];
u8 auth_state[0x2];
u8 reserved_at_64[0x4];
u8 reserved_at_44[0x4];
u8 hw_offset_record_number[0x18];
};
enum {
MLX5_MTT_PERM_READ = 1 << 0,
MLX5_MTT_PERM_WRITE = 1 << 1,
MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE,
};
#endif /* MLX5_IFC_H */
......@@ -209,7 +209,7 @@ struct mlx5_wqe_ctrl_seg {
__be32 general_id;
__be32 imm;
__be32 umr_mkey;
__be32 tisn;
__be32 tis_tir_num;
};
};
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies inc. */
#include <linux/mlx5/driver.h>
#ifndef __MLX5_RSC_DUMP
#define __MLX5_RSC_DUMP
enum mlx5_sgmt_type {
MLX5_SGMT_TYPE_HW_CQPC,
MLX5_SGMT_TYPE_HW_SQPC,
MLX5_SGMT_TYPE_HW_RQPC,
MLX5_SGMT_TYPE_FULL_SRQC,
MLX5_SGMT_TYPE_FULL_CQC,
MLX5_SGMT_TYPE_FULL_EQC,
MLX5_SGMT_TYPE_FULL_QPC,
MLX5_SGMT_TYPE_SND_BUFF,
MLX5_SGMT_TYPE_RCV_BUFF,
MLX5_SGMT_TYPE_SRQ_BUFF,
MLX5_SGMT_TYPE_CQ_BUFF,
MLX5_SGMT_TYPE_EQ_BUFF,
MLX5_SGMT_TYPE_SX_SLICE,
MLX5_SGMT_TYPE_SX_SLICE_ALL,
MLX5_SGMT_TYPE_RDB,
MLX5_SGMT_TYPE_RX_SLICE_ALL,
MLX5_SGMT_TYPE_PRM_QUERY_QP,
MLX5_SGMT_TYPE_PRM_QUERY_CQ,
MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
MLX5_SGMT_TYPE_MENU,
MLX5_SGMT_TYPE_TERMINATE,
MLX5_SGMT_TYPE_NUM, /* Keep last */
};
struct mlx5_rsc_key {
enum mlx5_sgmt_type rsc;
int index1;
int index2;
int num_of_obj1;
int num_of_obj2;
int size;
};
struct mlx5_rsc_dump_cmd;
struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
struct mlx5_rsc_key *key);
void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd);
int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
struct page *page, int *size);
#endif /* __MLX5_RSC_DUMP */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册