devx.c 81.3 KB
Newer Older
Y
Yishai Hadas 已提交
1 2 3 4 5 6 7 8 9 10
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
 * Copyright (c) 2018, Mellanox Technologies inc.  All rights reserved.
 */

#include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
11
#include <rdma/mlx5_user_ioctl_verbs.h>
Y
Yishai Hadas 已提交
12
#include <rdma/ib_umem.h>
13
#include <rdma/uverbs_std_types.h>
Y
Yishai Hadas 已提交
14 15 16
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
17
#include <linux/xarray.h>
Y
Yishai Hadas 已提交
18

19 20 21
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>

22 23
static void dispatch_event_fd(struct list_head *fd_list, const void *data);

24 25
enum devx_obj_flags {
	DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
26
	DEVX_OBJ_FLAGS_DCT = 1 << 1,
27
	DEVX_OBJ_FLAGS_CQ = 1 << 2,
28 29
};

30 31 32 33 34 35 36 37 38 39
struct devx_async_data {
	struct mlx5_ib_dev *mdev;
	struct list_head list;
	struct ib_uobject *fd_uobj;
	struct mlx5_async_work cb_work;
	u16 cmd_out_len;
	/* must be last field in this structure */
	struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
};

40 41 42 43 44
struct devx_async_event_data {
	struct list_head list; /* headed in ev_file->event_list */
	struct mlx5_ib_uapi_devx_async_event_hdr hdr;
};

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
/* first level XA value data structure */
struct devx_event {
	struct xarray object_ids; /* second XA level, Key = object id */
	struct list_head unaffiliated_list;
};

/* second level XA value data structure */
struct devx_obj_event {
	struct rcu_head rcu;
	struct list_head obj_sub_list;
};

struct devx_event_subscription {
	struct list_head file_list; /* headed in ev_file->
				     * subscribed_events_list
				     */
	struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
				   * devx_obj_event->obj_sub_list
				   */
	struct list_head obj_list; /* headed in devx_object */
	struct list_head event_list; /* headed in ev_file->event_list or in
				      * temp list via subscription
				      */

	u8 is_cleaned:1;
	u32 xa_key_level1;
	u32 xa_key_level2;
	struct rcu_head	rcu;
	u64 cookie;
	struct devx_async_event_file *ev_file;
	struct file *filp; /* Upon hot unplug we need a direct access to */
	struct eventfd_ctx *eventfd;
};

79 80 81 82 83 84 85 86 87
struct devx_async_event_file {
	struct ib_uobject uobj;
	/* Head of events that are subscribed to this FD */
	struct list_head subscribed_events_list;
	spinlock_t lock;
	wait_queue_head_t poll_wait;
	struct list_head event_list;
	struct mlx5_ib_dev *dev;
	u8 omit_data:1;
88 89
	u8 is_overflow_err:1;
	u8 is_destroyed:1;
90 91
};

92 93
#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
struct devx_obj {
94
	struct mlx5_ib_dev	*ib_dev;
Y
Yishai Hadas 已提交
95
	u64			obj_id;
96 97
	u32			dinlen; /* destroy inbox length */
	u32			dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
98
	u32			flags;
99 100 101
	union {
		struct mlx5_ib_devx_mr	devx_mr;
		struct mlx5_core_dct	core_dct;
102
		struct mlx5_core_cq	core_cq;
103
	};
104
	struct list_head event_sub; /* holds devx_event_subscription entries */
105 106
};

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
struct devx_umem {
	struct mlx5_core_dev		*mdev;
	struct ib_umem			*umem;
	u32				page_offset;
	int				page_shift;
	int				ncont;
	u32				dinlen;
	u32				dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
};

struct devx_umem_reg_cmd {
	void				*in;
	u32				inlen;
	u32				out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
};

123 124
static struct mlx5_ib_ucontext *
devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
125
{
126
	return to_mucontext(ib_uverbs_get_ucontext(attrs));
127 128
}

129
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
Y
Yishai Hadas 已提交
130 131 132
{
	u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
133
	void *uctx;
Y
Yishai Hadas 已提交
134
	int err;
135
	u16 uid;
136
	u32 cap = 0;
Y
Yishai Hadas 已提交
137

138 139
	/* 0 means not supported */
	if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
Y
Yishai Hadas 已提交
140 141
		return -EINVAL;

142
	uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
143 144 145
	if (is_user && capable(CAP_NET_RAW) &&
	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
		cap |= MLX5_UCTX_CAP_RAW_TX;
146 147 148 149
	if (is_user && capable(CAP_SYS_RAWIO) &&
	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
	     MLX5_UCTX_CAP_INTERNAL_DEV_RES))
		cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
150

151
	MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
152
	MLX5_SET(uctx, uctx, cap, cap);
Y
Yishai Hadas 已提交
153 154 155 156 157

	err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
	if (err)
		return err;

158 159
	uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
	return uid;
Y
Yishai Hadas 已提交
160 161
}

162
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
Y
Yishai Hadas 已提交
163
{
164
	u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
Y
Yishai Hadas 已提交
165 166
	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};

167 168
	MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
	MLX5_SET(destroy_uctx_in, in, uid, uid);
Y
Yishai Hadas 已提交
169 170 171

	mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
172

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
{
	struct devx_obj *devx_obj = obj;
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);

	switch (opcode) {
	case MLX5_CMD_OP_DESTROY_TIR:
		*dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
		*dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
				    obj_id);
		return true;

	case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
		*dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
		*dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
				    table_id);
		return true;
	default:
		return false;
	}
}

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
{
	struct devx_obj *devx_obj = obj;
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);

	if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
		*counter_id = MLX5_GET(dealloc_flow_counter_in,
				       devx_obj->dinbox,
				       flow_counter_id);
		return true;
	}

	return false;
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
static bool is_legacy_unaffiliated_event_num(u16 event_num)
{
	switch (event_num) {
	case MLX5_EVENT_TYPE_PORT_CHANGE:
		return true;
	default:
		return false;
	}
}

static bool is_legacy_obj_event_num(u16 event_num)
{
	switch (event_num) {
	case MLX5_EVENT_TYPE_PATH_MIG:
	case MLX5_EVENT_TYPE_COMM_EST:
	case MLX5_EVENT_TYPE_SQ_DRAINED:
	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
	case MLX5_EVENT_TYPE_CQ_ERROR:
	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
	case MLX5_EVENT_TYPE_DCT_DRAINED:
	case MLX5_EVENT_TYPE_COMP:
		return true;
	default:
		return false;
	}
}

static u16 get_legacy_obj_type(u16 opcode)
{
	switch (opcode) {
	case MLX5_CMD_OP_CREATE_RQ:
		return MLX5_EVENT_QUEUE_TYPE_RQ;
	case MLX5_CMD_OP_CREATE_QP:
		return MLX5_EVENT_QUEUE_TYPE_QP;
	case MLX5_CMD_OP_CREATE_SQ:
		return MLX5_EVENT_QUEUE_TYPE_SQ;
	case MLX5_CMD_OP_CREATE_DCT:
		return MLX5_EVENT_QUEUE_TYPE_DCT;
	default:
		return 0;
	}
}

static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
{
	u16 opcode;

	opcode = (obj->obj_id >> 32) & 0xffff;

	if (is_legacy_obj_event_num(event_num))
		return get_legacy_obj_type(opcode);

	switch (opcode) {
	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
		return (obj->obj_id >> 48);
	case MLX5_CMD_OP_CREATE_RQ:
		return MLX5_OBJ_TYPE_RQ;
	case MLX5_CMD_OP_CREATE_QP:
		return MLX5_OBJ_TYPE_QP;
	case MLX5_CMD_OP_CREATE_SQ:
		return MLX5_OBJ_TYPE_SQ;
	case MLX5_CMD_OP_CREATE_DCT:
		return MLX5_OBJ_TYPE_DCT;
	case MLX5_CMD_OP_CREATE_TIR:
		return MLX5_OBJ_TYPE_TIR;
	case MLX5_CMD_OP_CREATE_TIS:
		return MLX5_OBJ_TYPE_TIS;
	case MLX5_CMD_OP_CREATE_PSV:
		return MLX5_OBJ_TYPE_PSV;
	case MLX5_OBJ_TYPE_MKEY:
		return MLX5_OBJ_TYPE_MKEY;
	case MLX5_CMD_OP_CREATE_RMP:
		return MLX5_OBJ_TYPE_RMP;
	case MLX5_CMD_OP_CREATE_XRC_SRQ:
		return MLX5_OBJ_TYPE_XRC_SRQ;
	case MLX5_CMD_OP_CREATE_XRQ:
		return MLX5_OBJ_TYPE_XRQ;
	case MLX5_CMD_OP_CREATE_RQT:
		return MLX5_OBJ_TYPE_RQT;
	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
		return MLX5_OBJ_TYPE_FLOW_COUNTER;
	case MLX5_CMD_OP_CREATE_CQ:
		return MLX5_OBJ_TYPE_CQ;
	default:
		return 0;
	}
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
{
	switch (event_type) {
	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
	case MLX5_EVENT_TYPE_PATH_MIG:
	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
	case MLX5_EVENT_TYPE_COMM_EST:
	case MLX5_EVENT_TYPE_SQ_DRAINED:
	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
		return eqe->data.qp_srq.type;
	case MLX5_EVENT_TYPE_CQ_ERROR:
		return 0;
	case MLX5_EVENT_TYPE_DCT_DRAINED:
		return MLX5_EVENT_QUEUE_TYPE_DCT;
	default:
		return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
	}
}

326 327 328 329 330
static u32 get_dec_obj_id(u64 obj_id)
{
	return (obj_id & 0xffffffff);
}

Y
Yishai Hadas 已提交
331 332 333 334 335
/*
 * As the obj_id in the firmware is not globally unique the object type
 * must be considered upon checking for a valid object id.
 * For that the opcode of the creator command is encoded as part of the obj_id.
 */
336
static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
Y
Yishai Hadas 已提交
337 338 339 340
{
	return ((u64)opcode << 32) | obj_id;
}

341
static u64 devx_get_obj_id(const void *in)
342 343
{
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
Y
Yishai Hadas 已提交
344
	u64 obj_id;
345 346 347 348

	switch (opcode) {
	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
349 350 351
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
					MLX5_GET(general_obj_in_cmd_hdr, in,
						 obj_type) << 16,
Y
Yishai Hadas 已提交
352 353
					MLX5_GET(general_obj_in_cmd_hdr, in,
						 obj_id));
354 355
		break;
	case MLX5_CMD_OP_QUERY_MKEY:
Y
Yishai Hadas 已提交
356 357 358
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
					MLX5_GET(query_mkey_in, in,
						 mkey_index));
359 360
		break;
	case MLX5_CMD_OP_QUERY_CQ:
Y
Yishai Hadas 已提交
361 362
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
					MLX5_GET(query_cq_in, in, cqn));
363 364
		break;
	case MLX5_CMD_OP_MODIFY_CQ:
Y
Yishai Hadas 已提交
365 366
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
					MLX5_GET(modify_cq_in, in, cqn));
367 368
		break;
	case MLX5_CMD_OP_QUERY_SQ:
Y
Yishai Hadas 已提交
369 370
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
					MLX5_GET(query_sq_in, in, sqn));
371 372
		break;
	case MLX5_CMD_OP_MODIFY_SQ:
Y
Yishai Hadas 已提交
373 374
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
					MLX5_GET(modify_sq_in, in, sqn));
375 376
		break;
	case MLX5_CMD_OP_QUERY_RQ:
Y
Yishai Hadas 已提交
377 378
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
					MLX5_GET(query_rq_in, in, rqn));
379 380
		break;
	case MLX5_CMD_OP_MODIFY_RQ:
Y
Yishai Hadas 已提交
381 382
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
					MLX5_GET(modify_rq_in, in, rqn));
383 384
		break;
	case MLX5_CMD_OP_QUERY_RMP:
Y
Yishai Hadas 已提交
385 386
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
					MLX5_GET(query_rmp_in, in, rmpn));
387 388
		break;
	case MLX5_CMD_OP_MODIFY_RMP:
Y
Yishai Hadas 已提交
389 390
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
					MLX5_GET(modify_rmp_in, in, rmpn));
391 392
		break;
	case MLX5_CMD_OP_QUERY_RQT:
Y
Yishai Hadas 已提交
393 394
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
					MLX5_GET(query_rqt_in, in, rqtn));
395 396
		break;
	case MLX5_CMD_OP_MODIFY_RQT:
Y
Yishai Hadas 已提交
397 398
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
					MLX5_GET(modify_rqt_in, in, rqtn));
399 400
		break;
	case MLX5_CMD_OP_QUERY_TIR:
Y
Yishai Hadas 已提交
401 402
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
					MLX5_GET(query_tir_in, in, tirn));
403 404
		break;
	case MLX5_CMD_OP_MODIFY_TIR:
Y
Yishai Hadas 已提交
405 406
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
					MLX5_GET(modify_tir_in, in, tirn));
407 408
		break;
	case MLX5_CMD_OP_QUERY_TIS:
Y
Yishai Hadas 已提交
409 410
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
					MLX5_GET(query_tis_in, in, tisn));
411 412
		break;
	case MLX5_CMD_OP_MODIFY_TIS:
Y
Yishai Hadas 已提交
413 414
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
					MLX5_GET(modify_tis_in, in, tisn));
415 416
		break;
	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
Y
Yishai Hadas 已提交
417 418 419
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
					MLX5_GET(query_flow_table_in, in,
						 table_id));
420 421
		break;
	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
Y
Yishai Hadas 已提交
422 423 424
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
					MLX5_GET(modify_flow_table_in, in,
						 table_id));
425 426
		break;
	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
Y
Yishai Hadas 已提交
427 428 429
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
					MLX5_GET(query_flow_group_in, in,
						 group_id));
430 431
		break;
	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
Y
Yishai Hadas 已提交
432 433 434
		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
					MLX5_GET(query_fte_in, in,
						 flow_index));
435 436
		break;
	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
Y
Yishai Hadas 已提交
437 438
		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
					MLX5_GET(set_fte_in, in, flow_index));
439 440
		break;
	case MLX5_CMD_OP_QUERY_Q_COUNTER:
Y
Yishai Hadas 已提交
441 442 443
		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
					MLX5_GET(query_q_counter_in, in,
						 counter_set_id));
444 445
		break;
	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
Y
Yishai Hadas 已提交
446 447 448
		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
					MLX5_GET(query_flow_counter_in, in,
						 flow_counter_id));
449 450
		break;
	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
Y
Yishai Hadas 已提交
451 452 453
		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
					MLX5_GET(general_obj_in_cmd_hdr, in,
						 obj_id));
454 455
		break;
	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
Y
Yishai Hadas 已提交
456 457 458
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
					MLX5_GET(query_scheduling_element_in,
						 in, scheduling_element_id));
459 460
		break;
	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
Y
Yishai Hadas 已提交
461 462 463
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
					MLX5_GET(modify_scheduling_element_in,
						 in, scheduling_element_id));
464 465
		break;
	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
Y
Yishai Hadas 已提交
466 467 468
		obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
					MLX5_GET(add_vxlan_udp_dport_in, in,
						 vxlan_udp_port));
469 470
		break;
	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
Y
Yishai Hadas 已提交
471 472 473
		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
					MLX5_GET(query_l2_table_entry_in, in,
						 table_index));
474 475
		break;
	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
Y
Yishai Hadas 已提交
476 477 478
		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
					MLX5_GET(set_l2_table_entry_in, in,
						 table_index));
479 480
		break;
	case MLX5_CMD_OP_QUERY_QP:
Y
Yishai Hadas 已提交
481 482
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(query_qp_in, in, qpn));
483 484
		break;
	case MLX5_CMD_OP_RST2INIT_QP:
Y
Yishai Hadas 已提交
485 486
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(rst2init_qp_in, in, qpn));
487 488
		break;
	case MLX5_CMD_OP_INIT2RTR_QP:
Y
Yishai Hadas 已提交
489 490
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(init2rtr_qp_in, in, qpn));
491 492
		break;
	case MLX5_CMD_OP_RTR2RTS_QP:
Y
Yishai Hadas 已提交
493 494
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(rtr2rts_qp_in, in, qpn));
495 496
		break;
	case MLX5_CMD_OP_RTS2RTS_QP:
Y
Yishai Hadas 已提交
497 498
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(rts2rts_qp_in, in, qpn));
499 500
		break;
	case MLX5_CMD_OP_SQERR2RTS_QP:
Y
Yishai Hadas 已提交
501 502
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(sqerr2rts_qp_in, in, qpn));
503 504
		break;
	case MLX5_CMD_OP_2ERR_QP:
Y
Yishai Hadas 已提交
505 506
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(qp_2err_in, in, qpn));
507 508
		break;
	case MLX5_CMD_OP_2RST_QP:
Y
Yishai Hadas 已提交
509 510
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(qp_2rst_in, in, qpn));
511 512
		break;
	case MLX5_CMD_OP_QUERY_DCT:
Y
Yishai Hadas 已提交
513 514
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
					MLX5_GET(query_dct_in, in, dctn));
515 516
		break;
	case MLX5_CMD_OP_QUERY_XRQ:
517 518
	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
Y
Yishai Hadas 已提交
519 520
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
					MLX5_GET(query_xrq_in, in, xrqn));
521 522
		break;
	case MLX5_CMD_OP_QUERY_XRC_SRQ:
Y
Yishai Hadas 已提交
523 524 525
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
					MLX5_GET(query_xrc_srq_in, in,
						 xrc_srqn));
526 527
		break;
	case MLX5_CMD_OP_ARM_XRC_SRQ:
Y
Yishai Hadas 已提交
528 529
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
					MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
530 531
		break;
	case MLX5_CMD_OP_QUERY_SRQ:
Y
Yishai Hadas 已提交
532 533
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
					MLX5_GET(query_srq_in, in, srqn));
534 535
		break;
	case MLX5_CMD_OP_ARM_RQ:
Y
Yishai Hadas 已提交
536 537
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
					MLX5_GET(arm_rq_in, in, srq_number));
538 539
		break;
	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
Y
Yishai Hadas 已提交
540 541
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
					MLX5_GET(drain_dct_in, in, dctn));
542 543
		break;
	case MLX5_CMD_OP_ARM_XRQ:
544
	case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
Y
Yishai Hadas 已提交
545 546
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
					MLX5_GET(arm_xrq_in, in, xrqn));
547
		break;
548 549 550 551 552 553
	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
		obj_id = get_enc_obj_id
				(MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
				 MLX5_GET(query_packet_reformat_context_in,
					  in, packet_reformat_id));
		break;
554
	default:
555 556 557 558 559 560
		obj_id = 0;
	}

	return obj_id;
}

561 562
static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
				 struct ib_uobject *uobj, const void *in)
563
{
564
	struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
565 566 567
	u64 obj_id = devx_get_obj_id(in);

	if (!obj_id)
568
		return false;
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597

	switch (uobj_get_object_id(uobj)) {
	case UVERBS_OBJECT_CQ:
		return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
				      to_mcq(uobj->object)->mcq.cqn) ==
				      obj_id;

	case UVERBS_OBJECT_SRQ:
	{
		struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
		u16 opcode;

		switch (srq->common.res) {
		case MLX5_RES_XSRQ:
			opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
			break;
		case MLX5_RES_XRQ:
			opcode = MLX5_CMD_OP_CREATE_XRQ;
			break;
		default:
			if (!dev->mdev->issi)
				opcode = MLX5_CMD_OP_CREATE_SRQ;
			else
				opcode = MLX5_CMD_OP_CREATE_RMP;
		}

		return get_enc_obj_id(opcode,
				      to_msrq(uobj->object)->msrq.srqn) ==
				      obj_id;
598 599
	}

600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
	case UVERBS_OBJECT_QP:
	{
		struct mlx5_ib_qp *qp = to_mqp(uobj->object);
		enum ib_qp_type	qp_type = qp->ibqp.qp_type;

		if (qp_type == IB_QPT_RAW_PACKET ||
		    (qp->flags & MLX5_IB_QP_UNDERLAY)) {
			struct mlx5_ib_raw_packet_qp *raw_packet_qp =
							 &qp->raw_packet_qp;
			struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
			struct mlx5_ib_sq *sq = &raw_packet_qp->sq;

			return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
					       rq->base.mqp.qpn) == obj_id ||
				get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
					       sq->base.mqp.qpn) == obj_id ||
				get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
					       rq->tirn) == obj_id ||
				get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
					       sq->tisn) == obj_id);
		}

		if (qp_type == MLX5_IB_QPT_DCT)
			return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
					      qp->dct.mdct.mqp.qpn) == obj_id;

		return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
				      qp->ibqp.qp_num) == obj_id;
	}
629

630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
	case UVERBS_OBJECT_WQ:
		return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
				      to_mrwq(uobj->object)->core_qp.qpn) ==
				      obj_id;

	case UVERBS_OBJECT_RWQ_IND_TBL:
		return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
				      to_mrwq_ind_table(uobj->object)->rqtn) ==
				      obj_id;

	case MLX5_IB_OBJECT_DEVX_OBJ:
		return ((struct devx_obj *)uobj->object)->obj_id == obj_id;

	default:
		return false;
	}
646 647
}

648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
static void devx_set_umem_valid(const void *in)
{
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);

	switch (opcode) {
	case MLX5_CMD_OP_CREATE_MKEY:
		MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
		break;
	case MLX5_CMD_OP_CREATE_CQ:
	{
		void *cqc;

		MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
		cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
		MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
		break;
	}
	case MLX5_CMD_OP_CREATE_QP:
	{
		void *qpc;

		qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
		MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
		MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
		break;
	}

	case MLX5_CMD_OP_CREATE_RQ:
	{
		void *rqc, *wq;

		rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
		wq  = MLX5_ADDR_OF(rqc, rqc, wq);
		MLX5_SET(wq, wq, dbr_umem_valid, 1);
		MLX5_SET(wq, wq, wq_umem_valid, 1);
		break;
	}

	case MLX5_CMD_OP_CREATE_SQ:
	{
		void *sqc, *wq;

		sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
		wq = MLX5_ADDR_OF(sqc, sqc, wq);
		MLX5_SET(wq, wq, dbr_umem_valid, 1);
		MLX5_SET(wq, wq, wq_umem_valid, 1);
		break;
	}

	case MLX5_CMD_OP_MODIFY_CQ:
		MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
		break;

	case MLX5_CMD_OP_CREATE_RMP:
	{
		void *rmpc, *wq;

		rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
		wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
		MLX5_SET(wq, wq, dbr_umem_valid, 1);
		MLX5_SET(wq, wq, wq_umem_valid, 1);
		break;
	}

	case MLX5_CMD_OP_CREATE_XRQ:
	{
		void *xrqc, *wq;

		xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
		wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
		MLX5_SET(wq, wq, dbr_umem_valid, 1);
		MLX5_SET(wq, wq, wq_umem_valid, 1);
		break;
	}

	case MLX5_CMD_OP_CREATE_XRC_SRQ:
	{
		void *xrc_srqc;

		MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
		xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
					xrc_srq_context_entry);
		MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
		break;
	}

	default:
		return;
	}
}

Y
Yishai Hadas 已提交
739
static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
740
{
Y
Yishai Hadas 已提交
741
	*opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
742

Y
Yishai Hadas 已提交
743
	switch (*opcode) {
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
	case MLX5_CMD_OP_CREATE_MKEY:
	case MLX5_CMD_OP_CREATE_CQ:
	case MLX5_CMD_OP_ALLOC_PD:
	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
	case MLX5_CMD_OP_CREATE_RMP:
	case MLX5_CMD_OP_CREATE_SQ:
	case MLX5_CMD_OP_CREATE_RQ:
	case MLX5_CMD_OP_CREATE_RQT:
	case MLX5_CMD_OP_CREATE_TIR:
	case MLX5_CMD_OP_CREATE_TIS:
	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
759
	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
	case MLX5_CMD_OP_CREATE_QP:
	case MLX5_CMD_OP_CREATE_SRQ:
	case MLX5_CMD_OP_CREATE_XRC_SRQ:
	case MLX5_CMD_OP_CREATE_DCT:
	case MLX5_CMD_OP_CREATE_XRQ:
	case MLX5_CMD_OP_ATTACH_TO_MCG:
	case MLX5_CMD_OP_ALLOC_XRCD:
		return true;
	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
	{
		u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
		if (op_mod == 0)
			return true;
		return false;
	}
	default:
		return false;
	}
}

784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
static bool devx_is_obj_modify_cmd(const void *in)
{
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);

	switch (opcode) {
	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
	case MLX5_CMD_OP_MODIFY_CQ:
	case MLX5_CMD_OP_MODIFY_RMP:
	case MLX5_CMD_OP_MODIFY_SQ:
	case MLX5_CMD_OP_MODIFY_RQ:
	case MLX5_CMD_OP_MODIFY_RQT:
	case MLX5_CMD_OP_MODIFY_TIR:
	case MLX5_CMD_OP_MODIFY_TIS:
	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
	case MLX5_CMD_OP_RST2INIT_QP:
	case MLX5_CMD_OP_INIT2RTR_QP:
	case MLX5_CMD_OP_RTR2RTS_QP:
	case MLX5_CMD_OP_RTS2RTS_QP:
	case MLX5_CMD_OP_SQERR2RTS_QP:
	case MLX5_CMD_OP_2ERR_QP:
	case MLX5_CMD_OP_2RST_QP:
	case MLX5_CMD_OP_ARM_XRC_SRQ:
	case MLX5_CMD_OP_ARM_RQ:
	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
	case MLX5_CMD_OP_ARM_XRQ:
812
	case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
		return true;
	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
	{
		u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);

		if (op_mod == 1)
			return true;
		return false;
	}
	default:
		return false;
	}
}

static bool devx_is_obj_query_cmd(const void *in)
{
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);

	switch (opcode) {
	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
	case MLX5_CMD_OP_QUERY_MKEY:
	case MLX5_CMD_OP_QUERY_CQ:
	case MLX5_CMD_OP_QUERY_RMP:
	case MLX5_CMD_OP_QUERY_SQ:
	case MLX5_CMD_OP_QUERY_RQ:
	case MLX5_CMD_OP_QUERY_RQT:
	case MLX5_CMD_OP_QUERY_TIR:
	case MLX5_CMD_OP_QUERY_TIS:
	case MLX5_CMD_OP_QUERY_Q_COUNTER:
	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
	case MLX5_CMD_OP_QUERY_QP:
	case MLX5_CMD_OP_QUERY_SRQ:
	case MLX5_CMD_OP_QUERY_XRC_SRQ:
	case MLX5_CMD_OP_QUERY_DCT:
	case MLX5_CMD_OP_QUERY_XRQ:
854 855 856
	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
857 858 859 860 861 862
		return true;
	default:
		return false;
	}
}

863 864 865 866 867 868 869
static bool devx_is_whitelist_cmd(void *in)
{
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);

	switch (opcode) {
	case MLX5_CMD_OP_QUERY_HCA_CAP:
	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
870
	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
		return true;
	default:
		return false;
	}
}

static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
{
	if (devx_is_whitelist_cmd(cmd_in)) {
		struct mlx5_ib_dev *dev;

		if (c->devx_uid)
			return c->devx_uid;

		dev = to_mdev(c->ibucontext.device);
		if (dev->devx_whitelist_uid)
			return dev->devx_whitelist_uid;

		return -EOPNOTSUPP;
	}

	if (!c->devx_uid)
		return -EINVAL;

	return c->devx_uid;
}
897 898

static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
899 900 901
{
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);

902 903 904 905 906
	/* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
	if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
	     MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
	    (opcode >= MLX5_CMD_OP_GENERAL_START &&
	     opcode < MLX5_CMD_OP_GENERAL_END))
907 908
		return true;

909 910
	switch (opcode) {
	case MLX5_CMD_OP_QUERY_HCA_CAP:
911
	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
912
	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
	case MLX5_CMD_OP_QUERY_VPORT_STATE:
	case MLX5_CMD_OP_QUERY_ADAPTER:
	case MLX5_CMD_OP_QUERY_ISSI:
	case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
	case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
	case MLX5_CMD_OP_QUERY_VNIC_ENV:
	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
	case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
	case MLX5_CMD_OP_NOP:
	case MLX5_CMD_OP_QUERY_CONG_STATUS:
	case MLX5_CMD_OP_QUERY_CONG_PARAMS:
	case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
		return true;
	default:
		return false;
	}
}

931
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
932
	struct uverbs_attr_bundle *attrs)
933
{
934 935
	struct mlx5_ib_ucontext *c;
	struct mlx5_ib_dev *dev;
936 937 938 939 940 941 942 943 944
	int user_vector;
	int dev_eqn;
	unsigned int irqn;
	int err;

	if (uverbs_copy_from(&user_vector, attrs,
			     MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
		return -EFAULT;

945
	c = devx_ufile2uctx(attrs);
946 947 948 949
	if (IS_ERR(c))
		return PTR_ERR(c);
	dev = to_mdev(c->ibucontext.device);

950 951 952 953 954 955 956 957 958 959 960
	err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
	if (err < 0)
		return err;

	if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
			   &dev_eqn, sizeof(dev_eqn)))
		return -EFAULT;

	return 0;
}

961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
/*
 *Security note:
 * The hardware protection mechanism works like this: Each device object that
 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
 * the device specification manual) upon its creation. Then upon doorbell,
 * hardware fetches the object context for which the doorbell was rang, and
 * validates that the UAR through which the DB was rang matches the UAR ID
 * of the object.
 * If no match the doorbell is silently ignored by the hardware. Of course,
 * the user cannot ring a doorbell on a UAR that was not mapped to it.
 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
 * mailboxes (except tagging them with UID), we expose to the user its UAR
 * ID, so it can embed it in these objects in the expected specification
 * format. So the only thing the user can do is hurt itself by creating a
 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
 * may ring a doorbell on its objects.
 * The consequence of that will be that another user can schedule a QP/SQ
 * of the buggy user for execution (just insert it to the hardware schedule
 * queue or arm its CQ for event generation), no further harm is expected.
 */
981
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
982
	struct uverbs_attr_bundle *attrs)
983
{
984 985
	struct mlx5_ib_ucontext *c;
	struct mlx5_ib_dev *dev;
986 987 988
	u32 user_idx;
	s32 dev_idx;

989
	c = devx_ufile2uctx(attrs);
990 991 992 993
	if (IS_ERR(c))
		return PTR_ERR(c);
	dev = to_mdev(c->ibucontext.device);

994 995 996 997
	if (uverbs_copy_from(&user_idx, attrs,
			     MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
		return -EFAULT;

998
	dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
999 1000 1001 1002 1003 1004 1005 1006 1007 1008
	if (dev_idx < 0)
		return dev_idx;

	if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
			   &dev_idx, sizeof(dev_idx)))
		return -EFAULT;

	return 0;
}

1009
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1010
	struct uverbs_attr_bundle *attrs)
1011
{
1012 1013
	struct mlx5_ib_ucontext *c;
	struct mlx5_ib_dev *dev;
1014 1015
	void *cmd_in = uverbs_attr_get_alloced_ptr(
		attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1016 1017 1018 1019
	int cmd_out_len = uverbs_attr_get_len(attrs,
					MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
	void *cmd_out;
	int err;
1020
	int uid;
1021

1022
	c = devx_ufile2uctx(attrs);
1023 1024 1025 1026
	if (IS_ERR(c))
		return PTR_ERR(c);
	dev = to_mdev(c->ibucontext.device);

1027 1028 1029
	uid = devx_get_uid(c, cmd_in);
	if (uid < 0)
		return uid;
1030 1031

	/* Only white list of some general HCA commands are allowed for this method. */
1032
	if (!devx_is_general_cmd(cmd_in, dev))
1033 1034
		return -EINVAL;

1035 1036 1037
	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
	if (IS_ERR(cmd_out))
		return PTR_ERR(cmd_out);
1038

1039
	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1040 1041 1042 1043
	err = mlx5_cmd_exec(dev->mdev, cmd_in,
			    uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
			    cmd_out, cmd_out_len);
	if (err)
1044
		return err;
1045

1046 1047
	return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
			      cmd_out_len);
1048 1049
}

1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
				       u32 *dinlen,
				       u32 *obj_id)
{
	u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
	u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);

	*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
	*dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);

	MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
	MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);

	switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
		MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
		break;

1069 1070 1071 1072
	case MLX5_CMD_OP_CREATE_UMEM:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DESTROY_UMEM);
		break;
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
	case MLX5_CMD_OP_CREATE_MKEY:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
		break;
	case MLX5_CMD_OP_CREATE_CQ:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
		break;
	case MLX5_CMD_OP_ALLOC_PD:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
		break;
	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
		break;
	case MLX5_CMD_OP_CREATE_RMP:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
		break;
	case MLX5_CMD_OP_CREATE_SQ:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
		break;
	case MLX5_CMD_OP_CREATE_RQ:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
		break;
	case MLX5_CMD_OP_CREATE_RQT:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
		break;
	case MLX5_CMD_OP_CREATE_TIR:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
		break;
	case MLX5_CMD_OP_CREATE_TIS:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
		break;
	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
		break;
	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
		*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
		*obj_id = MLX5_GET(create_flow_table_out, out, table_id);
		MLX5_SET(destroy_flow_table_in, din, other_vport,
			 MLX5_GET(create_flow_table_in,  in, other_vport));
		MLX5_SET(destroy_flow_table_in, din, vport_number,
			 MLX5_GET(create_flow_table_in,  in, vport_number));
		MLX5_SET(destroy_flow_table_in, din, table_type,
			 MLX5_GET(create_flow_table_in,  in, table_type));
		MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
		break;
	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
		*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
		*obj_id = MLX5_GET(create_flow_group_out, out, group_id);
		MLX5_SET(destroy_flow_group_in, din, other_vport,
			 MLX5_GET(create_flow_group_in, in, other_vport));
		MLX5_SET(destroy_flow_group_in, din, vport_number,
			 MLX5_GET(create_flow_group_in, in, vport_number));
		MLX5_SET(destroy_flow_group_in, din, table_type,
			 MLX5_GET(create_flow_group_in, in, table_type));
		MLX5_SET(destroy_flow_group_in, din, table_id,
			 MLX5_GET(create_flow_group_in, in, table_id));
		MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
		break;
	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
		*dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
		*obj_id = MLX5_GET(set_fte_in, in, flow_index);
		MLX5_SET(delete_fte_in, din, other_vport,
			 MLX5_GET(set_fte_in,  in, other_vport));
		MLX5_SET(delete_fte_in, din, vport_number,
			 MLX5_GET(set_fte_in, in, vport_number));
		MLX5_SET(delete_fte_in, din, table_type,
			 MLX5_GET(set_fte_in, in, table_type));
		MLX5_SET(delete_fte_in, din, table_id,
			 MLX5_GET(set_fte_in, in, table_id));
		MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
		break;
	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
		break;
1155
	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1156
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1157
			 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
		break;
	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
		break;
	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
		*dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
		*obj_id = MLX5_GET(create_scheduling_element_out, out,
				   scheduling_element_id);
		MLX5_SET(destroy_scheduling_element_in, din,
			 scheduling_hierarchy,
			 MLX5_GET(create_scheduling_element_in, in,
				  scheduling_hierarchy));
		MLX5_SET(destroy_scheduling_element_in, din,
			 scheduling_element_id, *obj_id);
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
		break;
	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
		*dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
		*obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
		MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
		break;
	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
		*dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
		*obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
		MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
		break;
	case MLX5_CMD_OP_CREATE_QP:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
		break;
	case MLX5_CMD_OP_CREATE_SRQ:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
		break;
	case MLX5_CMD_OP_CREATE_XRC_SRQ:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DESTROY_XRC_SRQ);
		break;
	case MLX5_CMD_OP_CREATE_DCT:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
		break;
	case MLX5_CMD_OP_CREATE_XRQ:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
		break;
	case MLX5_CMD_OP_ATTACH_TO_MCG:
		*dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
		MLX5_SET(detach_from_mcg_in, din, qpn,
			 MLX5_GET(attach_to_mcg_in, in, qpn));
		memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
		       MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
		       MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
		break;
	case MLX5_CMD_OP_ALLOC_XRCD:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
		break;
	default:
		/* The entry must match to one of the devx_is_obj_create_cmd */
		WARN_ON(true);
		break;
	}
}

1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
static int devx_handle_mkey_indirect(struct devx_obj *obj,
				     struct mlx5_ib_dev *dev,
				     void *in, void *out)
{
	struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
	struct mlx5_core_mkey *mkey;
	void *mkc;
	u8 key;

	mkey = &devx_mr->mmkey;
	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
	key = MLX5_GET(mkc, mkc, mkey_7_0);
	mkey->key = mlx5_idx_to_mkey(
			MLX5_GET(create_mkey_out, out, mkey_index)) | key;
	mkey->type = MLX5_MKEY_INDIRECT_DEVX;
	mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
	mkey->size = MLX5_GET64(mkc, mkc, len);
	mkey->pd = MLX5_GET(mkc, mkc, pd);
	devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);

1245 1246
	return xa_err(xa_store(&dev->mdev->priv.mkey_table,
			       mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
1247 1248
}

1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
				   struct devx_obj *obj,
				   void *in, int in_len)
{
	int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
			MLX5_FLD_SZ_BYTES(create_mkey_in,
			memory_key_mkey_entry);
	void *mkc;
	u8 access_mode;

	if (in_len < min_len)
		return -EINVAL;

	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);

	access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
	access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;

	if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1268 1269 1270
		access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
		if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
			obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1271
		return 0;
1272
	}
1273 1274 1275 1276 1277

	MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
	return 0;
}

1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
static void devx_free_indirect_mkey(struct rcu_head *rcu)
{
	kfree(container_of(rcu, struct devx_obj, devx_mr.rcu));
}

/* This function to delete from the radix tree needs to be called before
 * destroying the underlying mkey. Otherwise a race might occur in case that
 * other thread will get the same mkey before this one will be deleted,
 * in that case it will fail via inserting to the tree its own data.
 *
 * Note:
 * An error in the destroy is not expected unless there is some other indirect
 * mkey which points to this one. In a kernel cleanup flow it will be just
 * destroyed in the iterative destruction call. In a user flow, in case
 * the application didn't close in the expected order it's its own problem,
 * the mkey won't be part of the tree, in both cases the kernel is safe.
 */
static void devx_cleanup_mkey(struct devx_obj *obj)
{
Y
Yishai Hadas 已提交
1297
	xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
1298
		 mlx5_base_mkey(obj->devx_mr.mmkey.key));
1299 1300
}

1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
				      struct devx_event_subscription *sub)
{
	struct devx_event *event;
	struct devx_obj_event *xa_val_level2;

	if (sub->is_cleaned)
		return;

	sub->is_cleaned = 1;
	list_del_rcu(&sub->xa_list);

	if (list_empty(&sub->obj_list))
		return;

	list_del_rcu(&sub->obj_list);
	/* check whether key level 1 for this obj_sub_list is empty */
	event = xa_load(&dev->devx_event_table.event_xa,
			sub->xa_key_level1);
	WARN_ON(!event);

	xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
	if (list_empty(&xa_val_level2->obj_sub_list)) {
		xa_erase(&event->object_ids,
			 sub->xa_key_level2);
		kfree_rcu(xa_val_level2, rcu);
	}
}

1330
static int devx_obj_cleanup(struct ib_uobject *uobject,
1331 1332
			    enum rdma_remove_reason why,
			    struct uverbs_attr_bundle *attrs)
1333 1334
{
	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1335
	struct mlx5_devx_event_table *devx_event_table;
1336
	struct devx_obj *obj = uobject->object;
1337 1338
	struct devx_event_subscription *sub_entry, *tmp;
	struct mlx5_ib_dev *dev;
1339 1340
	int ret;

1341
	dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1342 1343 1344
	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
		devx_cleanup_mkey(obj);

1345
	if (obj->flags & DEVX_OBJ_FLAGS_DCT)
Y
Yishai Hadas 已提交
1346
		ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1347
	else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
Y
Yishai Hadas 已提交
1348
		ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1349
	else
Y
Yishai Hadas 已提交
1350 1351
		ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
				    obj->dinlen, out, sizeof(out));
1352
	if (ib_is_destroy_retryable(ret, why, uobject))
1353 1354
		return ret;

1355
	devx_event_table = &dev->devx_event_table;
1356

1357 1358 1359 1360 1361 1362
	mutex_lock(&devx_event_table->event_xa_lock);
	list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
		devx_cleanup_subscription(dev, sub_entry);
	mutex_unlock(&devx_event_table->event_xa_lock);

	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1363 1364 1365 1366 1367
		call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu,
			  devx_free_indirect_mkey);
		return ret;
	}

1368 1369 1370 1371
	kfree(obj);
	return ret;
}

1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
{
	struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
	struct mlx5_devx_event_table *table;
	struct devx_event *event;
	struct devx_obj_event *obj_event;
	u32 obj_id = mcq->cqn;

	table = &obj->ib_dev->devx_event_table;
	rcu_read_lock();
	event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
	if (!event)
		goto out;

	obj_event = xa_load(&event->object_ids, obj_id);
	if (!obj_event)
		goto out;

	dispatch_event_fd(&obj_event->obj_sub_list, eqe);
out:
	rcu_read_unlock();
}

1395
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1396
	struct uverbs_attr_bundle *attrs)
1397 1398 1399 1400
{
	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
	int cmd_out_len =  uverbs_attr_get_len(attrs,
					MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1401 1402
	int cmd_in_len = uverbs_attr_get_len(attrs,
					MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1403
	void *cmd_out;
1404 1405
	struct ib_uobject *uobj = uverbs_attr_get_uobject(
		attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1406 1407
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1408
	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1409
	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1410
	struct devx_obj *obj;
1411
	u16 obj_type = 0;
1412
	int err;
1413
	int uid;
Y
Yishai Hadas 已提交
1414 1415
	u32 obj_id;
	u16 opcode;
1416

1417 1418 1419
	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
		return -EINVAL;

1420 1421 1422
	uid = devx_get_uid(c, cmd_in);
	if (uid < 0)
		return uid;
1423

Y
Yishai Hadas 已提交
1424
	if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1425 1426
		return -EINVAL;

1427 1428 1429 1430
	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
	if (IS_ERR(cmd_out))
		return PTR_ERR(cmd_out);

1431 1432 1433 1434
	obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
	if (!obj)
		return -ENOMEM;

1435
	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1436 1437 1438 1439 1440 1441 1442
	if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
		err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
		if (err)
			goto obj_free;
	} else {
		devx_set_umem_valid(cmd_in);
	}
1443

1444 1445 1446 1447 1448
	if (opcode == MLX5_CMD_OP_CREATE_DCT) {
		obj->flags |= DEVX_OBJ_FLAGS_DCT;
		err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
					   cmd_in, cmd_in_len,
					   cmd_out, cmd_out_len);
1449 1450 1451 1452 1453 1454
	} else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
		obj->flags |= DEVX_OBJ_FLAGS_CQ;
		obj->core_cq.comp = devx_cq_comp;
		err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
					  cmd_in, cmd_in_len, cmd_out,
					  cmd_out_len);
1455 1456 1457 1458 1459 1460
	} else {
		err = mlx5_cmd_exec(dev->mdev, cmd_in,
				    cmd_in_len,
				    cmd_out, cmd_out_len);
	}

1461
	if (err)
1462
		goto obj_free;
1463 1464

	uobj->object = obj;
1465
	INIT_LIST_HEAD(&obj->event_sub);
1466
	obj->ib_dev = dev;
Y
Yishai Hadas 已提交
1467 1468
	devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
				   &obj_id);
1469 1470
	WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));

1471 1472 1473 1474 1475 1476
	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
		err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
		if (err)
			goto obj_destroy;
	}

1477 1478
	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
	if (err)
1479
		goto err_copy;
1480

1481 1482 1483 1484 1485
	if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
		obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);

	obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);

1486 1487
	return 0;

1488
err_copy:
1489 1490
	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
		devx_cleanup_mkey(obj);
1491
obj_destroy:
1492
	if (obj->flags & DEVX_OBJ_FLAGS_DCT)
Y
Yishai Hadas 已提交
1493
		mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1494
	else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
Y
Yishai Hadas 已提交
1495
		mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1496
	else
Y
Yishai Hadas 已提交
1497
		mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1498
			      sizeof(out));
1499 1500 1501 1502 1503
obj_free:
	kfree(obj);
	return err;
}

1504
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1505
	struct uverbs_attr_bundle *attrs)
1506 1507 1508 1509 1510 1511
{
	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
	int cmd_out_len = uverbs_attr_get_len(attrs,
					MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
							  MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1512 1513 1514
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1515 1516
	void *cmd_out;
	int err;
1517
	int uid;
1518

1519 1520 1521
	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
		return -EINVAL;

1522 1523 1524
	uid = devx_get_uid(c, cmd_in);
	if (uid < 0)
		return uid;
1525 1526 1527 1528

	if (!devx_is_obj_modify_cmd(cmd_in))
		return -EINVAL;

1529
	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1530 1531
		return -EINVAL;

1532 1533 1534
	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
	if (IS_ERR(cmd_out))
		return PTR_ERR(cmd_out);
1535

1536
	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1537 1538
	devx_set_umem_valid(cmd_in);

1539
	err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1540 1541 1542
			    uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
			    cmd_out, cmd_out_len);
	if (err)
1543
		return err;
1544

1545 1546
	return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
			      cmd_out, cmd_out_len);
1547 1548
}

1549
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1550
	struct uverbs_attr_bundle *attrs)
1551 1552 1553 1554 1555 1556
{
	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
	int cmd_out_len = uverbs_attr_get_len(attrs,
					      MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
							  MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1557 1558
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1559 1560
	void *cmd_out;
	int err;
1561
	int uid;
1562
	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1563

1564 1565 1566
	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
		return -EINVAL;

1567 1568 1569
	uid = devx_get_uid(c, cmd_in);
	if (uid < 0)
		return uid;
1570 1571 1572 1573

	if (!devx_is_obj_query_cmd(cmd_in))
		return -EINVAL;

1574
	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1575 1576
		return -EINVAL;

1577 1578 1579
	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
	if (IS_ERR(cmd_out))
		return PTR_ERR(cmd_out);
1580

1581
	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1582
	err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1583 1584 1585
			    uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
			    cmd_out, cmd_out_len);
	if (err)
1586
		return err;
1587

1588 1589
	return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
			      cmd_out, cmd_out_len);
1590 1591
}

1592 1593 1594 1595
struct devx_async_event_queue {
	spinlock_t		lock;
	wait_queue_head_t	poll_wait;
	struct list_head	event_list;
1596
	atomic_t		bytes_in_use;
1597
	u8			is_destroyed:1;
1598 1599 1600 1601 1602
};

struct devx_async_cmd_event_file {
	struct ib_uobject		uobj;
	struct devx_async_event_queue	ev_queue;
1603
	struct mlx5_async_ctx		async_ctx;
1604 1605 1606 1607 1608 1609 1610
};

static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
{
	spin_lock_init(&ev_queue->lock);
	INIT_LIST_HEAD(&ev_queue->event_list);
	init_waitqueue_head(&ev_queue->poll_wait);
1611
	atomic_set(&ev_queue->bytes_in_use, 0);
1612
	ev_queue->is_destroyed = 0;
1613 1614 1615 1616 1617 1618 1619 1620 1621
}

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
	struct uverbs_attr_bundle *attrs)
{
	struct devx_async_cmd_event_file *ev_file;

	struct ib_uobject *uobj = uverbs_attr_get_uobject(
		attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1622
	struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1623 1624 1625 1626

	ev_file = container_of(uobj, struct devx_async_cmd_event_file,
			       uobj);
	devx_init_event_queue(&ev_file->ev_queue);
1627 1628 1629 1630
	mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
	return 0;
}

1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
	struct uverbs_attr_bundle *attrs)
{
	struct ib_uobject *uobj = uverbs_attr_get_uobject(
		attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
	struct devx_async_event_file *ev_file;
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
	u32 flags;
	int err;

	err = uverbs_get_flags32(&flags, attrs,
		MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
		MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);

	if (err)
		return err;

	ev_file = container_of(uobj, struct devx_async_event_file,
			       uobj);
	spin_lock_init(&ev_file->lock);
	INIT_LIST_HEAD(&ev_file->event_list);
	init_waitqueue_head(&ev_file->poll_wait);
	if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
		ev_file->omit_data = 1;
	INIT_LIST_HEAD(&ev_file->subscribed_events_list);
	ev_file->dev = dev;
1659
	get_device(&dev->ib_dev.dev);
1660 1661 1662
	return 0;
}

1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
static void devx_query_callback(int status, struct mlx5_async_work *context)
{
	struct devx_async_data *async_data =
		container_of(context, struct devx_async_data, cb_work);
	struct ib_uobject *fd_uobj = async_data->fd_uobj;
	struct devx_async_cmd_event_file *ev_file;
	struct devx_async_event_queue *ev_queue;
	unsigned long flags;

	ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
			       uobj);
	ev_queue = &ev_file->ev_queue;

	spin_lock_irqsave(&ev_queue->lock, flags);
	list_add_tail(&async_data->list, &ev_queue->event_list);
	spin_unlock_irqrestore(&ev_queue->lock, flags);

	wake_up_interruptible(&ev_queue->poll_wait);
	fput(fd_uobj->object);
}

#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
	struct uverbs_attr_bundle *attrs)
{
	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
	struct ib_uobject *uobj = uverbs_attr_get_uobject(
				attrs,
				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
	u16 cmd_out_len;
1695 1696
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1697 1698 1699
	struct ib_uobject *fd_uobj;
	int err;
	int uid;
1700
	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1701 1702 1703
	struct devx_async_cmd_event_file *ev_file;
	struct devx_async_data *async_data;

1704 1705 1706
	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
		return -EINVAL;

1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
	uid = devx_get_uid(c, cmd_in);
	if (uid < 0)
		return uid;

	if (!devx_is_obj_query_cmd(cmd_in))
		return -EINVAL;

	err = uverbs_get_const(&cmd_out_len, attrs,
			       MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
	if (err)
		return err;

1719
	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
		return -EINVAL;

	fd_uobj = uverbs_attr_get_uobject(attrs,
				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
	if (IS_ERR(fd_uobj))
		return PTR_ERR(fd_uobj);

	ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
			       uobj);

	if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
			MAX_ASYNC_BYTES_IN_USE) {
		atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
		return -EAGAIN;
	}

	async_data = kvzalloc(struct_size(async_data, hdr.out_data,
					  cmd_out_len), GFP_KERNEL);
	if (!async_data) {
		err = -ENOMEM;
		goto sub_bytes;
	}

	err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
			       MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
	if (err)
		goto free_async;

	async_data->cmd_out_len = cmd_out_len;
	async_data->mdev = mdev;
	async_data->fd_uobj = fd_uobj;

	get_file(fd_uobj->object);
	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
	err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
		    uverbs_attr_get_len(attrs,
				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
		    async_data->hdr.out_data,
		    async_data->cmd_out_len,
		    devx_query_callback, &async_data->cb_work);

	if (err)
		goto cb_err;

1764
	return 0;
1765 1766 1767 1768 1769 1770 1771 1772

cb_err:
	fput(fd_uobj->object);
free_async:
	kvfree(async_data);
sub_bytes:
	atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
	return err;
1773 1774
}

1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099
static void
subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
			   u32 key_level1,
			   bool is_level2,
			   u32 key_level2)
{
	struct devx_event *event;
	struct devx_obj_event *xa_val_level2;

	/* Level 1 is valid for future use, no need to free */
	if (!is_level2)
		return;

	event = xa_load(&devx_event_table->event_xa, key_level1);
	WARN_ON(!event);

	xa_val_level2 = xa_load(&event->object_ids,
				key_level2);
	if (list_empty(&xa_val_level2->obj_sub_list)) {
		xa_erase(&event->object_ids,
			 key_level2);
		kfree_rcu(xa_val_level2, rcu);
	}
}

static int
subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
			 u32 key_level1,
			 bool is_level2,
			 u32 key_level2)
{
	struct devx_obj_event *obj_event;
	struct devx_event *event;
	int err;

	event = xa_load(&devx_event_table->event_xa, key_level1);
	if (!event) {
		event = kzalloc(sizeof(*event), GFP_KERNEL);
		if (!event)
			return -ENOMEM;

		INIT_LIST_HEAD(&event->unaffiliated_list);
		xa_init(&event->object_ids);

		err = xa_insert(&devx_event_table->event_xa,
				key_level1,
				event,
				GFP_KERNEL);
		if (err) {
			kfree(event);
			return err;
		}
	}

	if (!is_level2)
		return 0;

	obj_event = xa_load(&event->object_ids, key_level2);
	if (!obj_event) {
		obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
		if (!obj_event)
			/* Level1 is valid for future use, no need to free */
			return -ENOMEM;

		err = xa_insert(&event->object_ids,
				key_level2,
				obj_event,
				GFP_KERNEL);
		if (err)
			return err;
		INIT_LIST_HEAD(&obj_event->obj_sub_list);
	}

	return 0;
}

static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
				   struct devx_obj *obj)
{
	int i;

	for (i = 0; i < num_events; i++) {
		if (obj) {
			if (!is_legacy_obj_event_num(event_type_num_list[i]))
				return false;
		} else if (!is_legacy_unaffiliated_event_num(
				event_type_num_list[i])) {
			return false;
		}
	}

	return true;
}

#define MAX_SUPP_EVENT_NUM 255
static bool is_valid_events(struct mlx5_core_dev *dev,
			    int num_events, u16 *event_type_num_list,
			    struct devx_obj *obj)
{
	__be64 *aff_events;
	__be64 *unaff_events;
	int mask_entry;
	int mask_bit;
	int i;

	if (MLX5_CAP_GEN(dev, event_cap)) {
		aff_events = MLX5_CAP_DEV_EVENT(dev,
						user_affiliated_events);
		unaff_events = MLX5_CAP_DEV_EVENT(dev,
						  user_unaffiliated_events);
	} else {
		return is_valid_events_legacy(num_events, event_type_num_list,
					      obj);
	}

	for (i = 0; i < num_events; i++) {
		if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
			return false;

		mask_entry = event_type_num_list[i] / 64;
		mask_bit = event_type_num_list[i] % 64;

		if (obj) {
			/* CQ completion */
			if (event_type_num_list[i] == 0)
				continue;

			if (!(be64_to_cpu(aff_events[mask_entry]) &
					(1ull << mask_bit)))
				return false;

			continue;
		}

		if (!(be64_to_cpu(unaff_events[mask_entry]) &
				(1ull << mask_bit)))
			return false;
	}

	return true;
}

#define MAX_NUM_EVENTS 16
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
	struct uverbs_attr_bundle *attrs)
{
	struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
				attrs,
				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
	struct ib_uobject *fd_uobj;
	struct devx_obj *obj = NULL;
	struct devx_async_event_file *ev_file;
	struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
	u16 *event_type_num_list;
	struct devx_event_subscription *event_sub, *tmp_sub;
	struct list_head sub_list;
	int redirect_fd;
	bool use_eventfd = false;
	int num_events;
	int num_alloc_xa_entries = 0;
	u16 obj_type = 0;
	u64 cookie = 0;
	u32 obj_id = 0;
	int err;
	int i;

	if (!c->devx_uid)
		return -EINVAL;

	if (!IS_ERR(devx_uobj)) {
		obj = (struct devx_obj *)devx_uobj->object;
		if (obj)
			obj_id = get_dec_obj_id(obj->obj_id);
	}

	fd_uobj = uverbs_attr_get_uobject(attrs,
				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
	if (IS_ERR(fd_uobj))
		return PTR_ERR(fd_uobj);

	ev_file = container_of(fd_uobj, struct devx_async_event_file,
			       uobj);

	if (uverbs_attr_is_valid(attrs,
				 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
		err = uverbs_copy_from(&redirect_fd, attrs,
			       MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
		if (err)
			return err;

		use_eventfd = true;
	}

	if (uverbs_attr_is_valid(attrs,
				 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
		if (use_eventfd)
			return -EINVAL;

		err = uverbs_copy_from(&cookie, attrs,
				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
		if (err)
			return err;
	}

	num_events = uverbs_attr_ptr_get_array_size(
		attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
		sizeof(u16));

	if (num_events < 0)
		return num_events;

	if (num_events > MAX_NUM_EVENTS)
		return -EINVAL;

	event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
			MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);

	if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
		return -EINVAL;

	INIT_LIST_HEAD(&sub_list);

	/* Protect from concurrent subscriptions to same XA entries to allow
	 * both to succeed
	 */
	mutex_lock(&devx_event_table->event_xa_lock);
	for (i = 0; i < num_events; i++) {
		u32 key_level1;

		if (obj)
			obj_type = get_dec_obj_type(obj,
						    event_type_num_list[i]);
		key_level1 = event_type_num_list[i] | obj_type << 16;

		err = subscribe_event_xa_alloc(devx_event_table,
					       key_level1,
					       obj,
					       obj_id);
		if (err)
			goto err;

		num_alloc_xa_entries++;
		event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
		if (!event_sub)
			goto err;

		list_add_tail(&event_sub->event_list, &sub_list);
		if (use_eventfd) {
			event_sub->eventfd =
				eventfd_ctx_fdget(redirect_fd);

			if (IS_ERR(event_sub)) {
				err = PTR_ERR(event_sub->eventfd);
				event_sub->eventfd = NULL;
				goto err;
			}
		}

		event_sub->cookie = cookie;
		event_sub->ev_file = ev_file;
		event_sub->filp = fd_uobj->object;
		/* May be needed upon cleanup the devx object/subscription */
		event_sub->xa_key_level1 = key_level1;
		event_sub->xa_key_level2 = obj_id;
		INIT_LIST_HEAD(&event_sub->obj_list);
	}

	/* Once all the allocations and the XA data insertions were done we
	 * can go ahead and add all the subscriptions to the relevant lists
	 * without concern of a failure.
	 */
	list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
		struct devx_event *event;
		struct devx_obj_event *obj_event;

		list_del_init(&event_sub->event_list);

		spin_lock_irq(&ev_file->lock);
		list_add_tail_rcu(&event_sub->file_list,
				  &ev_file->subscribed_events_list);
		spin_unlock_irq(&ev_file->lock);

		event = xa_load(&devx_event_table->event_xa,
				event_sub->xa_key_level1);
		WARN_ON(!event);

		if (!obj) {
			list_add_tail_rcu(&event_sub->xa_list,
					  &event->unaffiliated_list);
			continue;
		}

		obj_event = xa_load(&event->object_ids, obj_id);
		WARN_ON(!obj_event);
		list_add_tail_rcu(&event_sub->xa_list,
				  &obj_event->obj_sub_list);
		list_add_tail_rcu(&event_sub->obj_list,
				  &obj->event_sub);
	}

	mutex_unlock(&devx_event_table->event_xa_lock);
	return 0;

err:
	list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
		list_del(&event_sub->event_list);

		subscribe_event_xa_dealloc(devx_event_table,
					   event_sub->xa_key_level1,
					   obj,
					   obj_id);

		if (event_sub->eventfd)
			eventfd_ctx_put(event_sub->eventfd);

		kfree(event_sub);
	}

	mutex_unlock(&devx_event_table->event_xa_lock);
	return err;
}

2100 2101 2102 2103 2104 2105
static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
			 struct uverbs_attr_bundle *attrs,
			 struct devx_umem *obj)
{
	u64 addr;
	size_t size;
2106
	u32 access;
2107 2108 2109 2110 2111
	int npages;
	int err;
	u32 page_mask;

	if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2112
	    uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2113 2114
		return -EFAULT;

2115 2116
	err = uverbs_get_flags32(&access, attrs,
				 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2117 2118 2119
				 IB_ACCESS_LOCAL_WRITE |
				 IB_ACCESS_REMOTE_WRITE |
				 IB_ACCESS_REMOTE_READ);
2120 2121 2122
	if (err)
		return err;

2123 2124 2125 2126
	err = ib_check_mr_access(access);
	if (err)
		return err;

2127
	obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
	if (IS_ERR(obj->umem))
		return PTR_ERR(obj->umem);

	mlx5_ib_cont_pages(obj->umem, obj->umem->address,
			   MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
			   &obj->page_shift, &obj->ncont, NULL);

	if (!npages) {
		ib_umem_release(obj->umem);
		return -EINVAL;
	}

	page_mask = (1 << obj->page_shift) - 1;
	obj->page_offset = obj->umem->address & page_mask;

	return 0;
}

2146 2147
static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
				   struct devx_umem *obj,
2148 2149 2150 2151
				   struct devx_umem_reg_cmd *cmd)
{
	cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
		    (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
2152 2153
	cmd->in = uverbs_zalloc(attrs, cmd->inlen);
	return PTR_ERR_OR_ZERO(cmd->in);
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165
}

static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
				    struct devx_umem *obj,
				    struct devx_umem_reg_cmd *cmd)
{
	void *umem;
	__be64 *mtt;

	umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
	mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);

2166
	MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2167 2168 2169 2170 2171 2172 2173 2174 2175
	MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
	MLX5_SET(umem, umem, log_page_size, obj->page_shift -
					    MLX5_ADAPTER_PAGE_SHIFT);
	MLX5_SET(umem, umem, page_offset, obj->page_offset);
	mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
			     (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
			     MLX5_IB_MTT_READ);
}

2176
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2177
	struct uverbs_attr_bundle *attrs)
2178 2179 2180
{
	struct devx_umem_reg_cmd cmd;
	struct devx_umem *obj;
2181 2182
	struct ib_uobject *uobj = uverbs_attr_get_uobject(
		attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2183
	u32 obj_id;
2184 2185
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2186
	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2187 2188 2189
	int err;

	if (!c->devx_uid)
2190 2191
		return -EINVAL;

2192 2193 2194 2195 2196 2197 2198 2199
	obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
	if (!obj)
		return -ENOMEM;

	err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
	if (err)
		goto err_obj_free;

2200
	err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
2201 2202 2203 2204 2205
	if (err)
		goto err_umem_release;

	devx_umem_reg_cmd_build(dev, obj, &cmd);

2206
	MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2207 2208 2209
	err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
			    sizeof(cmd.out));
	if (err)
2210
		goto err_umem_release;
2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230

	obj->mdev = dev->mdev;
	uobj->object = obj;
	devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
	if (err)
		goto err_umem_destroy;

	return 0;

err_umem_destroy:
	mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
err_umem_release:
	ib_umem_release(obj->umem);
err_obj_free:
	kfree(obj);
	return err;
}

static int devx_umem_cleanup(struct ib_uobject *uobject,
2231 2232
			     enum rdma_remove_reason why,
			     struct uverbs_attr_bundle *attrs)
2233 2234 2235 2236 2237 2238
{
	struct devx_umem *obj = uobject->object;
	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
	int err;

	err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2239
	if (ib_is_destroy_retryable(err, why, uobject))
2240 2241 2242 2243 2244 2245 2246
		return err;

	ib_umem_release(obj->umem);
	kfree(obj);
	return 0;
}

2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363
static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
				  unsigned long event_type)
{
	__be64 *unaff_events;
	int mask_entry;
	int mask_bit;

	if (!MLX5_CAP_GEN(dev, event_cap))
		return is_legacy_unaffiliated_event_num(event_type);

	unaff_events = MLX5_CAP_DEV_EVENT(dev,
					  user_unaffiliated_events);
	WARN_ON(event_type > MAX_SUPP_EVENT_NUM);

	mask_entry = event_type / 64;
	mask_bit = event_type % 64;

	if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
		return false;

	return true;
}

static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
{
	struct mlx5_eqe *eqe = data;
	u32 obj_id = 0;

	switch (event_type) {
	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
	case MLX5_EVENT_TYPE_PATH_MIG:
	case MLX5_EVENT_TYPE_COMM_EST:
	case MLX5_EVENT_TYPE_SQ_DRAINED:
	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
		obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
		break;
	case MLX5_EVENT_TYPE_DCT_DRAINED:
		obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
		break;
	case MLX5_EVENT_TYPE_CQ_ERROR:
		obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
		break;
	default:
		obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
		break;
	}

	return obj_id;
}

static int deliver_event(struct devx_event_subscription *event_sub,
			 const void *data)
{
	struct devx_async_event_file *ev_file;
	struct devx_async_event_data *event_data;
	unsigned long flags;

	ev_file = event_sub->ev_file;

	if (ev_file->omit_data) {
		spin_lock_irqsave(&ev_file->lock, flags);
		if (!list_empty(&event_sub->event_list)) {
			spin_unlock_irqrestore(&ev_file->lock, flags);
			return 0;
		}

		list_add_tail(&event_sub->event_list, &ev_file->event_list);
		spin_unlock_irqrestore(&ev_file->lock, flags);
		wake_up_interruptible(&ev_file->poll_wait);
		return 0;
	}

	event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
			     GFP_ATOMIC);
	if (!event_data) {
		spin_lock_irqsave(&ev_file->lock, flags);
		ev_file->is_overflow_err = 1;
		spin_unlock_irqrestore(&ev_file->lock, flags);
		return -ENOMEM;
	}

	event_data->hdr.cookie = event_sub->cookie;
	memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));

	spin_lock_irqsave(&ev_file->lock, flags);
	list_add_tail(&event_data->list, &ev_file->event_list);
	spin_unlock_irqrestore(&ev_file->lock, flags);
	wake_up_interruptible(&ev_file->poll_wait);

	return 0;
}

static void dispatch_event_fd(struct list_head *fd_list,
			      const void *data)
{
	struct devx_event_subscription *item;

	list_for_each_entry_rcu(item, fd_list, xa_list) {
		if (!get_file_rcu(item->filp))
			continue;

		if (item->eventfd) {
			eventfd_signal(item->eventfd, 1);
			fput(item->filp);
			continue;
		}

		deliver_event(item, data);
		fput(item->filp);
	}
}

2364 2365 2366
static int devx_event_notifier(struct notifier_block *nb,
			       unsigned long event_type, void *data)
{
2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410
	struct mlx5_devx_event_table *table;
	struct mlx5_ib_dev *dev;
	struct devx_event *event;
	struct devx_obj_event *obj_event;
	u16 obj_type = 0;
	bool is_unaffiliated;
	u32 obj_id;

	/* Explicit filtering to kernel events which may occur frequently */
	if (event_type == MLX5_EVENT_TYPE_CMD ||
	    event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
		return NOTIFY_OK;

	table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
	dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
	is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);

	if (!is_unaffiliated)
		obj_type = get_event_obj_type(event_type, data);

	rcu_read_lock();
	event = xa_load(&table->event_xa, event_type | (obj_type << 16));
	if (!event) {
		rcu_read_unlock();
		return NOTIFY_DONE;
	}

	if (is_unaffiliated) {
		dispatch_event_fd(&event->unaffiliated_list, data);
		rcu_read_unlock();
		return NOTIFY_OK;
	}

	obj_id = devx_get_obj_id_from_event(event_type, data);
	obj_event = xa_load(&event->object_ids, obj_id);
	if (!obj_event) {
		rcu_read_unlock();
		return NOTIFY_DONE;
	}

	dispatch_event_fd(&obj_event->obj_sub_list, data);

	rcu_read_unlock();
	return NOTIFY_OK;
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
}

void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
{
	struct mlx5_devx_event_table *table = &dev->devx_event_table;

	xa_init(&table->event_xa);
	mutex_init(&table->event_xa_lock);
	MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
	mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
}

void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
{
	struct mlx5_devx_event_table *table = &dev->devx_event_table;
2426 2427
	struct devx_event_subscription *sub, *tmp;
	struct devx_event *event;
2428 2429 2430 2431
	void *entry;
	unsigned long id;

	mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2432 2433 2434 2435 2436 2437
	mutex_lock(&dev->devx_event_table.event_xa_lock);
	xa_for_each(&table->event_xa, id, entry) {
		event = entry;
		list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list,
					 xa_list)
			devx_cleanup_subscription(dev, sub);
2438
		kfree(entry);
2439 2440
	}
	mutex_unlock(&dev->devx_event_table.event_xa_lock);
2441 2442 2443
	xa_destroy(&table->event_xa);
}

2444 2445 2446
static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
					 size_t count, loff_t *pos)
{
2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462
	struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
	struct devx_async_data *event;
	int ret = 0;
	size_t eventsz;

	spin_lock_irq(&ev_queue->lock);

	while (list_empty(&ev_queue->event_list)) {
		spin_unlock_irq(&ev_queue->lock);

		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;

		if (wait_event_interruptible(
			    ev_queue->poll_wait,
2463 2464
			    (!list_empty(&ev_queue->event_list) ||
			     ev_queue->is_destroyed))) {
2465 2466
			return -ERESTARTSYS;
		}
2467 2468 2469 2470 2471

		if (list_empty(&ev_queue->event_list) &&
		    ev_queue->is_destroyed)
			return -EIO;

2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495
		spin_lock_irq(&ev_queue->lock);
	}

	event = list_entry(ev_queue->event_list.next,
			   struct devx_async_data, list);
	eventsz = event->cmd_out_len +
			sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);

	if (eventsz > count) {
		spin_unlock_irq(&ev_queue->lock);
		return -ENOSPC;
	}

	list_del(ev_queue->event_list.next);
	spin_unlock_irq(&ev_queue->lock);

	if (copy_to_user(buf, &event->hdr, eventsz))
		ret = -EFAULT;
	else
		ret = eventsz;

	atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
	kvfree(event);
	return ret;
2496 2497 2498 2499
}

static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
{
2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510
	struct ib_uobject *uobj = filp->private_data;
	struct devx_async_cmd_event_file *comp_ev_file = container_of(
		uobj, struct devx_async_cmd_event_file, uobj);
	struct devx_async_data *entry, *tmp;

	spin_lock_irq(&comp_ev_file->ev_queue.lock);
	list_for_each_entry_safe(entry, tmp,
				 &comp_ev_file->ev_queue.event_list, list)
		kvfree(entry);
	spin_unlock_irq(&comp_ev_file->ev_queue.lock);

2511 2512 2513 2514 2515 2516 2517
	uverbs_close_fd(filp);
	return 0;
}

static __poll_t devx_async_cmd_event_poll(struct file *filp,
					      struct poll_table_struct *wait)
{
2518 2519 2520 2521 2522 2523 2524
	struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
	__poll_t pollflags = 0;

	poll_wait(filp, &ev_queue->poll_wait, wait);

	spin_lock_irq(&ev_queue->lock);
2525 2526 2527
	if (ev_queue->is_destroyed)
		pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
	else if (!list_empty(&ev_queue->event_list))
2528 2529 2530 2531
		pollflags = EPOLLIN | EPOLLRDNORM;
	spin_unlock_irq(&ev_queue->lock);

	return pollflags;
2532 2533
}

2534
static const struct file_operations devx_async_cmd_event_fops = {
2535 2536 2537 2538 2539 2540 2541
	.owner	 = THIS_MODULE,
	.read	 = devx_async_cmd_event_read,
	.poll    = devx_async_cmd_event_poll,
	.release = devx_async_cmd_event_close,
	.llseek	 = no_llseek,
};

2542 2543 2544
static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
				     size_t count, loff_t *pos)
{
2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
	struct devx_async_event_file *ev_file = filp->private_data;
	struct devx_event_subscription *event_sub;
	struct devx_async_event_data *uninitialized_var(event);
	int ret = 0;
	size_t eventsz;
	bool omit_data;
	void *event_data;

	omit_data = ev_file->omit_data;

	spin_lock_irq(&ev_file->lock);

	if (ev_file->is_overflow_err) {
		ev_file->is_overflow_err = 0;
		spin_unlock_irq(&ev_file->lock);
		return -EOVERFLOW;
	}

	if (ev_file->is_destroyed) {
		spin_unlock_irq(&ev_file->lock);
		return -EIO;
	}

	while (list_empty(&ev_file->event_list)) {
		spin_unlock_irq(&ev_file->lock);

		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;

		if (wait_event_interruptible(ev_file->poll_wait,
			    (!list_empty(&ev_file->event_list) ||
			     ev_file->is_destroyed))) {
			return -ERESTARTSYS;
		}

		spin_lock_irq(&ev_file->lock);
		if (ev_file->is_destroyed) {
			spin_unlock_irq(&ev_file->lock);
			return -EIO;
		}
	}

	if (omit_data) {
		event_sub = list_first_entry(&ev_file->event_list,
					struct devx_event_subscription,
					event_list);
		eventsz = sizeof(event_sub->cookie);
		event_data = &event_sub->cookie;
	} else {
		event = list_first_entry(&ev_file->event_list,
				      struct devx_async_event_data, list);
		eventsz = sizeof(struct mlx5_eqe) +
			sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
		event_data = &event->hdr;
	}

	if (eventsz > count) {
		spin_unlock_irq(&ev_file->lock);
		return -EINVAL;
	}

	if (omit_data)
		list_del_init(&event_sub->event_list);
	else
		list_del(&event->list);

	spin_unlock_irq(&ev_file->lock);

	if (copy_to_user(buf, event_data, eventsz))
		/* This points to an application issue, not a kernel concern */
		ret = -EFAULT;
	else
		ret = eventsz;

	if (!omit_data)
		kfree(event);
	return ret;
2622 2623 2624 2625 2626
}

static __poll_t devx_async_event_poll(struct file *filp,
				      struct poll_table_struct *wait)
{
2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639
	struct devx_async_event_file *ev_file = filp->private_data;
	__poll_t pollflags = 0;

	poll_wait(filp, &ev_file->poll_wait, wait);

	spin_lock_irq(&ev_file->lock);
	if (ev_file->is_destroyed)
		pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
	else if (!list_empty(&ev_file->event_list))
		pollflags = EPOLLIN | EPOLLRDNORM;
	spin_unlock_irq(&ev_file->lock);

	return pollflags;
2640 2641 2642 2643
}

static int devx_async_event_close(struct inode *inode, struct file *filp)
{
2644 2645
	struct devx_async_event_file *ev_file = filp->private_data;
	struct devx_event_subscription *event_sub, *event_sub_tmp;
2646
	struct devx_async_event_data *entry, *tmp;
2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662

	mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock);
	/* delete the subscriptions which are related to this FD */
	list_for_each_entry_safe(event_sub, event_sub_tmp,
				 &ev_file->subscribed_events_list, file_list) {
		devx_cleanup_subscription(ev_file->dev, event_sub);
		if (event_sub->eventfd)
			eventfd_ctx_put(event_sub->eventfd);

		list_del_rcu(&event_sub->file_list);
		/* subscription may not be used by the read API any more */
		kfree_rcu(event_sub, rcu);
	}

	mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock);

2663 2664 2665 2666 2667 2668 2669 2670 2671
	/* free the pending events allocation */
	if (!ev_file->omit_data) {
		spin_lock_irq(&ev_file->lock);
		list_for_each_entry_safe(entry, tmp,
					 &ev_file->event_list, list)
			kfree(entry); /* read can't come any more */
		spin_unlock_irq(&ev_file->lock);
	}

2672
	uverbs_close_fd(filp);
2673
	put_device(&ev_file->dev->ib_dev.dev);
2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
	return 0;
}

static const struct file_operations devx_async_event_fops = {
	.owner	 = THIS_MODULE,
	.read	 = devx_async_event_read,
	.poll    = devx_async_event_poll,
	.release = devx_async_event_close,
	.llseek	 = no_llseek,
};

2685 2686 2687
static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
						   enum rdma_remove_reason why)
{
2688 2689 2690
	struct devx_async_cmd_event_file *comp_ev_file =
		container_of(uobj, struct devx_async_cmd_event_file,
			     uobj);
2691 2692 2693 2694 2695 2696 2697 2698
	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;

	spin_lock_irq(&ev_queue->lock);
	ev_queue->is_destroyed = 1;
	spin_unlock_irq(&ev_queue->lock);

	if (why == RDMA_REMOVE_DRIVER_REMOVE)
		wake_up_interruptible(&ev_queue->poll_wait);
2699 2700

	mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2701 2702 2703
	return 0;
};

2704 2705 2706
static int devx_hot_unplug_async_event_file(struct ib_uobject *uobj,
					    enum rdma_remove_reason why)
{
2707 2708 2709 2710 2711 2712 2713 2714 2715
	struct devx_async_event_file *ev_file =
		container_of(uobj, struct devx_async_event_file,
			     uobj);

	spin_lock_irq(&ev_file->lock);
	ev_file->is_destroyed = 1;
	spin_unlock_irq(&ev_file->lock);

	wake_up_interruptible(&ev_file->poll_wait);
2716 2717 2718
	return 0;
};

2719 2720 2721 2722 2723
DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_UMEM_REG,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
			MLX5_IB_OBJECT_DEVX_UMEM,
			UVERBS_ACCESS_NEW,
J
Jason Gunthorpe 已提交
2724
			UA_MANDATORY),
2725 2726
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
			   UVERBS_ATTR_TYPE(u64),
J
Jason Gunthorpe 已提交
2727
			   UA_MANDATORY),
2728 2729
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
			   UVERBS_ATTR_TYPE(u64),
J
Jason Gunthorpe 已提交
2730
			   UA_MANDATORY),
2731 2732
	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
			     enum ib_access_flags),
2733 2734
	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
			    UVERBS_ATTR_TYPE(u32),
J
Jason Gunthorpe 已提交
2735
			    UA_MANDATORY));
2736

2737
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2738 2739 2740 2741
	MLX5_IB_METHOD_DEVX_UMEM_DEREG,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
			MLX5_IB_OBJECT_DEVX_UMEM,
			UVERBS_ACCESS_DESTROY,
J
Jason Gunthorpe 已提交
2742
			UA_MANDATORY));
2743 2744 2745 2746 2747

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_QUERY_EQN,
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
			   UVERBS_ATTR_TYPE(u32),
J
Jason Gunthorpe 已提交
2748
			   UA_MANDATORY),
2749 2750
	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
			    UVERBS_ATTR_TYPE(u32),
J
Jason Gunthorpe 已提交
2751
			    UA_MANDATORY));
2752 2753 2754 2755 2756

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_QUERY_UAR,
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
			   UVERBS_ATTR_TYPE(u32),
J
Jason Gunthorpe 已提交
2757
			   UA_MANDATORY),
2758 2759
	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
			    UVERBS_ATTR_TYPE(u32),
J
Jason Gunthorpe 已提交
2760
			    UA_MANDATORY));
2761 2762 2763 2764 2765 2766

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_OTHER,
	UVERBS_ATTR_PTR_IN(
		MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
J
Jason Gunthorpe 已提交
2767 2768
		UA_MANDATORY,
		UA_ALLOC_AND_COPY),
2769 2770 2771
	UVERBS_ATTR_PTR_OUT(
		MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2772
		UA_MANDATORY));
2773 2774 2775 2776 2777 2778

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_OBJ_CREATE,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
			MLX5_IB_OBJECT_DEVX_OBJ,
			UVERBS_ACCESS_NEW,
J
Jason Gunthorpe 已提交
2779
			UA_MANDATORY),
2780 2781 2782
	UVERBS_ATTR_PTR_IN(
		MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
J
Jason Gunthorpe 已提交
2783 2784
		UA_MANDATORY,
		UA_ALLOC_AND_COPY),
2785 2786 2787
	UVERBS_ATTR_PTR_OUT(
		MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2788
		UA_MANDATORY));
2789

2790
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2791 2792 2793 2794
	MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
			MLX5_IB_OBJECT_DEVX_OBJ,
			UVERBS_ACCESS_DESTROY,
J
Jason Gunthorpe 已提交
2795
			UA_MANDATORY));
2796 2797 2798 2799

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2800
			UVERBS_IDR_ANY_OBJECT,
2801
			UVERBS_ACCESS_WRITE,
J
Jason Gunthorpe 已提交
2802
			UA_MANDATORY),
2803 2804 2805
	UVERBS_ATTR_PTR_IN(
		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
J
Jason Gunthorpe 已提交
2806 2807
		UA_MANDATORY,
		UA_ALLOC_AND_COPY),
2808 2809 2810
	UVERBS_ATTR_PTR_OUT(
		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2811
		UA_MANDATORY));
2812 2813 2814 2815

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_OBJ_QUERY,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2816
			UVERBS_IDR_ANY_OBJECT,
2817
			UVERBS_ACCESS_READ,
J
Jason Gunthorpe 已提交
2818
			UA_MANDATORY),
2819 2820 2821
	UVERBS_ATTR_PTR_IN(
		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
J
Jason Gunthorpe 已提交
2822 2823
		UA_MANDATORY,
		UA_ALLOC_AND_COPY),
2824 2825 2826
	UVERBS_ATTR_PTR_OUT(
		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2827
		UA_MANDATORY));
2828

2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849
DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
			UVERBS_IDR_ANY_OBJECT,
			UVERBS_ACCESS_READ,
			UA_MANDATORY),
	UVERBS_ATTR_PTR_IN(
		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
		UA_MANDATORY,
		UA_ALLOC_AND_COPY),
	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
		u16, UA_MANDATORY),
	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
		MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
		UVERBS_ACCESS_READ,
		UA_MANDATORY),
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
		UVERBS_ATTR_TYPE(u64),
		UA_MANDATORY));

2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870
DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
		MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
		UVERBS_ACCESS_READ,
		UA_MANDATORY),
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
		MLX5_IB_OBJECT_DEVX_OBJ,
		UVERBS_ACCESS_READ,
		UA_OPTIONAL),
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
		UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
		UA_MANDATORY,
		UA_ALLOC_AND_COPY),
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
		UVERBS_ATTR_TYPE(u64),
		UA_OPTIONAL),
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
		UVERBS_ATTR_TYPE(u32),
		UA_OPTIONAL));

2871
DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
2872 2873
			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
2874 2875
			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
2876

2877
DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
2878 2879 2880 2881
			    UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
2882 2883
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
2884

2885
DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
2886 2887 2888
			    UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
2889

2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
			MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
			UVERBS_ACCESS_NEW,
			UA_MANDATORY));

DECLARE_UVERBS_NAMED_OBJECT(
	MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
			     devx_hot_unplug_async_cmd_event_file,
			     &devx_async_cmd_event_fops, "[devx_async_cmd]",
			     O_RDONLY),
	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));

2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923
DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
			MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
			UVERBS_ACCESS_NEW,
			UA_MANDATORY),
	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
			enum mlx5_ib_uapi_devx_create_event_channel_flags,
			UA_MANDATORY));

DECLARE_UVERBS_NAMED_OBJECT(
	MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
			     devx_hot_unplug_async_event_file,
			     &devx_async_event_fops, "[devx_async_event]",
			     O_RDONLY),
	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));

2924
static bool devx_is_supported(struct ib_device *device)
Y
Yishai Hadas 已提交
2925
{
2926 2927
	struct mlx5_ib_dev *dev = to_mdev(device);

2928
	return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
Y
Yishai Hadas 已提交
2929
}
2930

2931
const struct uapi_definition mlx5_ib_devx_defs[] = {
2932 2933 2934 2935 2936 2937 2938 2939 2940
	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
		MLX5_IB_OBJECT_DEVX,
		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
		MLX5_IB_OBJECT_DEVX_OBJ,
		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
		MLX5_IB_OBJECT_DEVX_UMEM,
		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2941 2942 2943
	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
		MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2944 2945 2946
	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
		MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2947 2948
	{},
};