提交 76dc5a84 编写于 作者: Y Yishai Hadas 提交者: Jason Gunthorpe

IB/mlx5: Manage device uid for DEVX white list commands

Manage device uid for DEVX white list commands.  The created device uid
will be used on white list commands if the user didn't supply its own uid.

This will enable the firmware to filter out non privileged functionality
as of the recognition of the uid.
Signed-off-by: NYishai Hadas <yishaih@mellanox.com>
Signed-off-by: NLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: NJason Gunthorpe <jgg@mellanox.com>
上级 7f72052c
......@@ -45,13 +45,14 @@ static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
return to_mucontext(ib_uverbs_get_ucontext(file));
}
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
u64 general_obj_types;
void *hdr;
int err;
u16 uid;
hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
......@@ -70,19 +71,18 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *contex
if (err)
return err;
context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return 0;
uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return uid;
}
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
struct mlx5_ib_ucontext *context)
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, uid);
mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
......
......@@ -1765,9 +1765,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
goto out_uars;
}
err = mlx5_ib_devx_create(dev, context);
if (err)
err = mlx5_ib_devx_create(dev);
if (err < 0)
goto out_uars;
context->devx_uid = err;
}
err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
......@@ -1870,7 +1871,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
out_devx:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
mlx5_ib_devx_destroy(dev, context);
mlx5_ib_devx_destroy(dev, context->devx_uid);
out_uars:
deallocate_uars(dev, context);
......@@ -1904,7 +1905,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
if (context->devx_uid)
mlx5_ib_devx_destroy(dev, context);
mlx5_ib_devx_destroy(dev, context->devx_uid);
deallocate_uars(dev, context);
kfree(bfregi->sys_pages);
......@@ -6189,6 +6190,8 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
profile->stage[stage].cleanup(dev);
}
if (dev->devx_whitelist_uid)
mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
ib_dealloc_device((struct ib_device *)dev);
}
......@@ -6197,6 +6200,7 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
{
int err;
int i;
int uid;
for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
if (profile->stage[i].init) {
......@@ -6206,6 +6210,10 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
}
}
uid = mlx5_ib_devx_create(dev);
if (uid > 0)
dev->devx_whitelist_uid = uid;
dev->profile = profile;
dev->ib_active = true;
......
......@@ -925,6 +925,7 @@ struct mlx5_ib_dev {
struct list_head ib_dev_list;
u64 sys_image_guid;
struct mlx5_memic memic;
u16 devx_whitelist_uid;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
......@@ -1249,10 +1250,8 @@ void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num);
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
struct mlx5_ib_ucontext *context);
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
struct mlx5_ib_ucontext *context);
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev);
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
......@@ -1263,10 +1262,8 @@ int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
#else
static inline int
mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; };
static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
struct mlx5_ib_ucontext *context) {}
mlx5_ib_devx_create(struct mlx5_ib_dev *dev) { return -EOPNOTSUPP; };
static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
static inline const struct uverbs_object_tree_def *
mlx5_ib_get_devx_tree(void) { return NULL; }
static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册