devx.c 81.2 KB
Newer Older
Y
Yishai Hadas 已提交
1 2 3 4 5 6 7 8 9 10
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
 * Copyright (c) 2018, Mellanox Technologies inc.  All rights reserved.
 */

#include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
11
#include <rdma/mlx5_user_ioctl_verbs.h>
Y
Yishai Hadas 已提交
12
#include <rdma/ib_umem.h>
13
#include <rdma/uverbs_std_types.h>
Y
Yishai Hadas 已提交
14 15 16
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
17
#include <linux/xarray.h>
Y
Yishai Hadas 已提交
18

19 20 21
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>

22 23
static void dispatch_event_fd(struct list_head *fd_list, const void *data);

24 25
enum devx_obj_flags {
	DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
26
	DEVX_OBJ_FLAGS_DCT = 1 << 1,
27
	DEVX_OBJ_FLAGS_CQ = 1 << 2,
28 29
};

30 31 32 33 34 35 36 37 38 39
struct devx_async_data {
	struct mlx5_ib_dev *mdev;
	struct list_head list;
	struct ib_uobject *fd_uobj;
	struct mlx5_async_work cb_work;
	u16 cmd_out_len;
	/* must be last field in this structure */
	struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
};

40 41 42 43 44
struct devx_async_event_data {
	struct list_head list; /* headed in ev_file->event_list */
	struct mlx5_ib_uapi_devx_async_event_hdr hdr;
};

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
/* first level XA value data structure */
struct devx_event {
	struct xarray object_ids; /* second XA level, Key = object id */
	struct list_head unaffiliated_list;
};

/* second level XA value data structure */
struct devx_obj_event {
	struct rcu_head rcu;
	struct list_head obj_sub_list;
};

struct devx_event_subscription {
	struct list_head file_list; /* headed in ev_file->
				     * subscribed_events_list
				     */
	struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
				   * devx_obj_event->obj_sub_list
				   */
	struct list_head obj_list; /* headed in devx_object */
	struct list_head event_list; /* headed in ev_file->event_list or in
				      * temp list via subscription
				      */

	u8 is_cleaned:1;
	u32 xa_key_level1;
	u32 xa_key_level2;
	struct rcu_head	rcu;
	u64 cookie;
	struct devx_async_event_file *ev_file;
	struct file *filp; /* Upon hot unplug we need a direct access to */
	struct eventfd_ctx *eventfd;
};

79 80 81 82 83 84 85 86 87
struct devx_async_event_file {
	struct ib_uobject uobj;
	/* Head of events that are subscribed to this FD */
	struct list_head subscribed_events_list;
	spinlock_t lock;
	wait_queue_head_t poll_wait;
	struct list_head event_list;
	struct mlx5_ib_dev *dev;
	u8 omit_data:1;
88 89
	u8 is_overflow_err:1;
	u8 is_destroyed:1;
90 91
};

92 93
#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
struct devx_obj {
94
	struct mlx5_ib_dev	*ib_dev;
Y
Yishai Hadas 已提交
95
	u64			obj_id;
96 97
	u32			dinlen; /* destroy inbox length */
	u32			dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
98
	u32			flags;
99 100 101
	union {
		struct mlx5_ib_devx_mr	devx_mr;
		struct mlx5_core_dct	core_dct;
102
		struct mlx5_core_cq	core_cq;
103
	};
104
	struct list_head event_sub; /* holds devx_event_subscription entries */
105 106
};

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
struct devx_umem {
	struct mlx5_core_dev		*mdev;
	struct ib_umem			*umem;
	u32				page_offset;
	int				page_shift;
	int				ncont;
	u32				dinlen;
	u32				dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
};

struct devx_umem_reg_cmd {
	void				*in;
	u32				inlen;
	u32				out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
};

123 124
static struct mlx5_ib_ucontext *
devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
125
{
126
	return to_mucontext(ib_uverbs_get_ucontext(attrs));
127 128
}

129
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
Y
Yishai Hadas 已提交
130 131 132
{
	u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
133
	void *uctx;
Y
Yishai Hadas 已提交
134
	int err;
135
	u16 uid;
136
	u32 cap = 0;
Y
Yishai Hadas 已提交
137

138 139
	/* 0 means not supported */
	if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
Y
Yishai Hadas 已提交
140 141
		return -EINVAL;

142
	uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
143 144 145
	if (is_user && capable(CAP_NET_RAW) &&
	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
		cap |= MLX5_UCTX_CAP_RAW_TX;
146 147 148 149
	if (is_user && capable(CAP_SYS_RAWIO) &&
	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
	     MLX5_UCTX_CAP_INTERNAL_DEV_RES))
		cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
150

151
	MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
152
	MLX5_SET(uctx, uctx, cap, cap);
Y
Yishai Hadas 已提交
153 154 155 156 157

	err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
	if (err)
		return err;

158 159
	uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
	return uid;
Y
Yishai Hadas 已提交
160 161
}

162
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
Y
Yishai Hadas 已提交
163
{
164
	u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
Y
Yishai Hadas 已提交
165 166
	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};

167 168
	MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
	MLX5_SET(destroy_uctx_in, in, uid, uid);
Y
Yishai Hadas 已提交
169 170 171

	mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
172

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
{
	struct devx_obj *devx_obj = obj;
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);

	switch (opcode) {
	case MLX5_CMD_OP_DESTROY_TIR:
		*dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
		*dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
				    obj_id);
		return true;

	case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
		*dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
		*dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
				    table_id);
		return true;
	default:
		return false;
	}
}

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
{
	struct devx_obj *devx_obj = obj;
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);

	if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
		*counter_id = MLX5_GET(dealloc_flow_counter_in,
				       devx_obj->dinbox,
				       flow_counter_id);
		return true;
	}

	return false;
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
static bool is_legacy_unaffiliated_event_num(u16 event_num)
{
	switch (event_num) {
	case MLX5_EVENT_TYPE_PORT_CHANGE:
		return true;
	default:
		return false;
	}
}

static bool is_legacy_obj_event_num(u16 event_num)
{
	switch (event_num) {
	case MLX5_EVENT_TYPE_PATH_MIG:
	case MLX5_EVENT_TYPE_COMM_EST:
	case MLX5_EVENT_TYPE_SQ_DRAINED:
	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
	case MLX5_EVENT_TYPE_CQ_ERROR:
	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
	case MLX5_EVENT_TYPE_DCT_DRAINED:
	case MLX5_EVENT_TYPE_COMP:
236 237
	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
	case MLX5_EVENT_TYPE_XRQ_ERROR:
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
		return true;
	default:
		return false;
	}
}

static u16 get_legacy_obj_type(u16 opcode)
{
	switch (opcode) {
	case MLX5_CMD_OP_CREATE_RQ:
		return MLX5_EVENT_QUEUE_TYPE_RQ;
	case MLX5_CMD_OP_CREATE_QP:
		return MLX5_EVENT_QUEUE_TYPE_QP;
	case MLX5_CMD_OP_CREATE_SQ:
		return MLX5_EVENT_QUEUE_TYPE_SQ;
	case MLX5_CMD_OP_CREATE_DCT:
		return MLX5_EVENT_QUEUE_TYPE_DCT;
	default:
		return 0;
	}
}

static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
{
	u16 opcode;

	opcode = (obj->obj_id >> 32) & 0xffff;

	if (is_legacy_obj_event_num(event_num))
		return get_legacy_obj_type(opcode);

	switch (opcode) {
	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
		return (obj->obj_id >> 48);
	case MLX5_CMD_OP_CREATE_RQ:
		return MLX5_OBJ_TYPE_RQ;
	case MLX5_CMD_OP_CREATE_QP:
		return MLX5_OBJ_TYPE_QP;
	case MLX5_CMD_OP_CREATE_SQ:
		return MLX5_OBJ_TYPE_SQ;
	case MLX5_CMD_OP_CREATE_DCT:
		return MLX5_OBJ_TYPE_DCT;
	case MLX5_CMD_OP_CREATE_TIR:
		return MLX5_OBJ_TYPE_TIR;
	case MLX5_CMD_OP_CREATE_TIS:
		return MLX5_OBJ_TYPE_TIS;
	case MLX5_CMD_OP_CREATE_PSV:
		return MLX5_OBJ_TYPE_PSV;
	case MLX5_OBJ_TYPE_MKEY:
		return MLX5_OBJ_TYPE_MKEY;
	case MLX5_CMD_OP_CREATE_RMP:
		return MLX5_OBJ_TYPE_RMP;
	case MLX5_CMD_OP_CREATE_XRC_SRQ:
		return MLX5_OBJ_TYPE_XRC_SRQ;
	case MLX5_CMD_OP_CREATE_XRQ:
		return MLX5_OBJ_TYPE_XRQ;
	case MLX5_CMD_OP_CREATE_RQT:
		return MLX5_OBJ_TYPE_RQT;
	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
		return MLX5_OBJ_TYPE_FLOW_COUNTER;
	case MLX5_CMD_OP_CREATE_CQ:
		return MLX5_OBJ_TYPE_CQ;
	default:
		return 0;
	}
}

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
{
	switch (event_type) {
	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
	case MLX5_EVENT_TYPE_PATH_MIG:
	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
	case MLX5_EVENT_TYPE_COMM_EST:
	case MLX5_EVENT_TYPE_SQ_DRAINED:
	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
		return eqe->data.qp_srq.type;
	case MLX5_EVENT_TYPE_CQ_ERROR:
320
	case MLX5_EVENT_TYPE_XRQ_ERROR:
321 322
		return 0;
	case MLX5_EVENT_TYPE_DCT_DRAINED:
323
	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
324 325 326 327 328 329
		return MLX5_EVENT_QUEUE_TYPE_DCT;
	default:
		return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
	}
}

330 331 332 333 334
static u32 get_dec_obj_id(u64 obj_id)
{
	return (obj_id & 0xffffffff);
}

Y
Yishai Hadas 已提交
335 336 337 338 339
/*
 * As the obj_id in the firmware is not globally unique the object type
 * must be considered upon checking for a valid object id.
 * For that the opcode of the creator command is encoded as part of the obj_id.
 */
340
static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
Y
Yishai Hadas 已提交
341 342 343 344
{
	return ((u64)opcode << 32) | obj_id;
}

345
static u64 devx_get_obj_id(const void *in)
346 347
{
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
Y
Yishai Hadas 已提交
348
	u64 obj_id;
349 350 351 352

	switch (opcode) {
	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
353 354 355
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
					MLX5_GET(general_obj_in_cmd_hdr, in,
						 obj_type) << 16,
Y
Yishai Hadas 已提交
356 357
					MLX5_GET(general_obj_in_cmd_hdr, in,
						 obj_id));
358 359
		break;
	case MLX5_CMD_OP_QUERY_MKEY:
Y
Yishai Hadas 已提交
360 361 362
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
					MLX5_GET(query_mkey_in, in,
						 mkey_index));
363 364
		break;
	case MLX5_CMD_OP_QUERY_CQ:
Y
Yishai Hadas 已提交
365 366
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
					MLX5_GET(query_cq_in, in, cqn));
367 368
		break;
	case MLX5_CMD_OP_MODIFY_CQ:
Y
Yishai Hadas 已提交
369 370
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
					MLX5_GET(modify_cq_in, in, cqn));
371 372
		break;
	case MLX5_CMD_OP_QUERY_SQ:
Y
Yishai Hadas 已提交
373 374
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
					MLX5_GET(query_sq_in, in, sqn));
375 376
		break;
	case MLX5_CMD_OP_MODIFY_SQ:
Y
Yishai Hadas 已提交
377 378
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
					MLX5_GET(modify_sq_in, in, sqn));
379 380
		break;
	case MLX5_CMD_OP_QUERY_RQ:
Y
Yishai Hadas 已提交
381 382
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
					MLX5_GET(query_rq_in, in, rqn));
383 384
		break;
	case MLX5_CMD_OP_MODIFY_RQ:
Y
Yishai Hadas 已提交
385 386
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
					MLX5_GET(modify_rq_in, in, rqn));
387 388
		break;
	case MLX5_CMD_OP_QUERY_RMP:
Y
Yishai Hadas 已提交
389 390
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
					MLX5_GET(query_rmp_in, in, rmpn));
391 392
		break;
	case MLX5_CMD_OP_MODIFY_RMP:
Y
Yishai Hadas 已提交
393 394
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
					MLX5_GET(modify_rmp_in, in, rmpn));
395 396
		break;
	case MLX5_CMD_OP_QUERY_RQT:
Y
Yishai Hadas 已提交
397 398
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
					MLX5_GET(query_rqt_in, in, rqtn));
399 400
		break;
	case MLX5_CMD_OP_MODIFY_RQT:
Y
Yishai Hadas 已提交
401 402
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
					MLX5_GET(modify_rqt_in, in, rqtn));
403 404
		break;
	case MLX5_CMD_OP_QUERY_TIR:
Y
Yishai Hadas 已提交
405 406
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
					MLX5_GET(query_tir_in, in, tirn));
407 408
		break;
	case MLX5_CMD_OP_MODIFY_TIR:
Y
Yishai Hadas 已提交
409 410
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
					MLX5_GET(modify_tir_in, in, tirn));
411 412
		break;
	case MLX5_CMD_OP_QUERY_TIS:
Y
Yishai Hadas 已提交
413 414
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
					MLX5_GET(query_tis_in, in, tisn));
415 416
		break;
	case MLX5_CMD_OP_MODIFY_TIS:
Y
Yishai Hadas 已提交
417 418
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
					MLX5_GET(modify_tis_in, in, tisn));
419 420
		break;
	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
Y
Yishai Hadas 已提交
421 422 423
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
					MLX5_GET(query_flow_table_in, in,
						 table_id));
424 425
		break;
	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
Y
Yishai Hadas 已提交
426 427 428
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
					MLX5_GET(modify_flow_table_in, in,
						 table_id));
429 430
		break;
	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
Y
Yishai Hadas 已提交
431 432 433
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
					MLX5_GET(query_flow_group_in, in,
						 group_id));
434 435
		break;
	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
Y
Yishai Hadas 已提交
436 437 438
		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
					MLX5_GET(query_fte_in, in,
						 flow_index));
439 440
		break;
	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
Y
Yishai Hadas 已提交
441 442
		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
					MLX5_GET(set_fte_in, in, flow_index));
443 444
		break;
	case MLX5_CMD_OP_QUERY_Q_COUNTER:
Y
Yishai Hadas 已提交
445 446 447
		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
					MLX5_GET(query_q_counter_in, in,
						 counter_set_id));
448 449
		break;
	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
Y
Yishai Hadas 已提交
450 451 452
		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
					MLX5_GET(query_flow_counter_in, in,
						 flow_counter_id));
453 454
		break;
	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
Y
Yishai Hadas 已提交
455 456 457
		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
					MLX5_GET(general_obj_in_cmd_hdr, in,
						 obj_id));
458 459
		break;
	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
Y
Yishai Hadas 已提交
460 461 462
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
					MLX5_GET(query_scheduling_element_in,
						 in, scheduling_element_id));
463 464
		break;
	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
Y
Yishai Hadas 已提交
465 466 467
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
					MLX5_GET(modify_scheduling_element_in,
						 in, scheduling_element_id));
468 469
		break;
	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
Y
Yishai Hadas 已提交
470 471 472
		obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
					MLX5_GET(add_vxlan_udp_dport_in, in,
						 vxlan_udp_port));
473 474
		break;
	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
Y
Yishai Hadas 已提交
475 476 477
		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
					MLX5_GET(query_l2_table_entry_in, in,
						 table_index));
478 479
		break;
	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
Y
Yishai Hadas 已提交
480 481 482
		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
					MLX5_GET(set_l2_table_entry_in, in,
						 table_index));
483 484
		break;
	case MLX5_CMD_OP_QUERY_QP:
Y
Yishai Hadas 已提交
485 486
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(query_qp_in, in, qpn));
487 488
		break;
	case MLX5_CMD_OP_RST2INIT_QP:
Y
Yishai Hadas 已提交
489 490
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(rst2init_qp_in, in, qpn));
491 492
		break;
	case MLX5_CMD_OP_INIT2RTR_QP:
Y
Yishai Hadas 已提交
493 494
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(init2rtr_qp_in, in, qpn));
495 496
		break;
	case MLX5_CMD_OP_RTR2RTS_QP:
Y
Yishai Hadas 已提交
497 498
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(rtr2rts_qp_in, in, qpn));
499 500
		break;
	case MLX5_CMD_OP_RTS2RTS_QP:
Y
Yishai Hadas 已提交
501 502
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(rts2rts_qp_in, in, qpn));
503 504
		break;
	case MLX5_CMD_OP_SQERR2RTS_QP:
Y
Yishai Hadas 已提交
505 506
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(sqerr2rts_qp_in, in, qpn));
507 508
		break;
	case MLX5_CMD_OP_2ERR_QP:
Y
Yishai Hadas 已提交
509 510
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(qp_2err_in, in, qpn));
511 512
		break;
	case MLX5_CMD_OP_2RST_QP:
Y
Yishai Hadas 已提交
513 514
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
					MLX5_GET(qp_2rst_in, in, qpn));
515 516
		break;
	case MLX5_CMD_OP_QUERY_DCT:
Y
Yishai Hadas 已提交
517 518
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
					MLX5_GET(query_dct_in, in, dctn));
519 520
		break;
	case MLX5_CMD_OP_QUERY_XRQ:
521 522
	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
Y
Yishai Hadas 已提交
523 524
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
					MLX5_GET(query_xrq_in, in, xrqn));
525 526
		break;
	case MLX5_CMD_OP_QUERY_XRC_SRQ:
Y
Yishai Hadas 已提交
527 528 529
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
					MLX5_GET(query_xrc_srq_in, in,
						 xrc_srqn));
530 531
		break;
	case MLX5_CMD_OP_ARM_XRC_SRQ:
Y
Yishai Hadas 已提交
532 533
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
					MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
534 535
		break;
	case MLX5_CMD_OP_QUERY_SRQ:
Y
Yishai Hadas 已提交
536 537
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
					MLX5_GET(query_srq_in, in, srqn));
538 539
		break;
	case MLX5_CMD_OP_ARM_RQ:
Y
Yishai Hadas 已提交
540 541
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
					MLX5_GET(arm_rq_in, in, srq_number));
542 543
		break;
	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
Y
Yishai Hadas 已提交
544 545
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
					MLX5_GET(drain_dct_in, in, dctn));
546 547
		break;
	case MLX5_CMD_OP_ARM_XRQ:
548
	case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
549 550
	case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
	case MLX5_CMD_OP_MODIFY_XRQ:
Y
Yishai Hadas 已提交
551 552
		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
					MLX5_GET(arm_xrq_in, in, xrqn));
553
		break;
554 555 556 557 558 559
	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
		obj_id = get_enc_obj_id
				(MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
				 MLX5_GET(query_packet_reformat_context_in,
					  in, packet_reformat_id));
		break;
560
	default:
561 562 563 564 565 566
		obj_id = 0;
	}

	return obj_id;
}

567 568
static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
				 struct ib_uobject *uobj, const void *in)
569
{
570
	struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
571 572 573
	u64 obj_id = devx_get_obj_id(in);

	if (!obj_id)
574
		return false;
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603

	switch (uobj_get_object_id(uobj)) {
	case UVERBS_OBJECT_CQ:
		return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
				      to_mcq(uobj->object)->mcq.cqn) ==
				      obj_id;

	case UVERBS_OBJECT_SRQ:
	{
		struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
		u16 opcode;

		switch (srq->common.res) {
		case MLX5_RES_XSRQ:
			opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
			break;
		case MLX5_RES_XRQ:
			opcode = MLX5_CMD_OP_CREATE_XRQ;
			break;
		default:
			if (!dev->mdev->issi)
				opcode = MLX5_CMD_OP_CREATE_SRQ;
			else
				opcode = MLX5_CMD_OP_CREATE_RMP;
		}

		return get_enc_obj_id(opcode,
				      to_msrq(uobj->object)->msrq.srqn) ==
				      obj_id;
604 605
	}

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
	case UVERBS_OBJECT_QP:
	{
		struct mlx5_ib_qp *qp = to_mqp(uobj->object);
		enum ib_qp_type	qp_type = qp->ibqp.qp_type;

		if (qp_type == IB_QPT_RAW_PACKET ||
		    (qp->flags & MLX5_IB_QP_UNDERLAY)) {
			struct mlx5_ib_raw_packet_qp *raw_packet_qp =
							 &qp->raw_packet_qp;
			struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
			struct mlx5_ib_sq *sq = &raw_packet_qp->sq;

			return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
					       rq->base.mqp.qpn) == obj_id ||
				get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
					       sq->base.mqp.qpn) == obj_id ||
				get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
					       rq->tirn) == obj_id ||
				get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
					       sq->tisn) == obj_id);
		}

		if (qp_type == MLX5_IB_QPT_DCT)
			return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
					      qp->dct.mdct.mqp.qpn) == obj_id;

		return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
				      qp->ibqp.qp_num) == obj_id;
	}
635

636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
	case UVERBS_OBJECT_WQ:
		return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
				      to_mrwq(uobj->object)->core_qp.qpn) ==
				      obj_id;

	case UVERBS_OBJECT_RWQ_IND_TBL:
		return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
				      to_mrwq_ind_table(uobj->object)->rqtn) ==
				      obj_id;

	case MLX5_IB_OBJECT_DEVX_OBJ:
		return ((struct devx_obj *)uobj->object)->obj_id == obj_id;

	default:
		return false;
	}
652 653
}

654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
static void devx_set_umem_valid(const void *in)
{
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);

	switch (opcode) {
	case MLX5_CMD_OP_CREATE_MKEY:
		MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
		break;
	case MLX5_CMD_OP_CREATE_CQ:
	{
		void *cqc;

		MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
		cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
		MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
		break;
	}
	case MLX5_CMD_OP_CREATE_QP:
	{
		void *qpc;

		qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
		MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
		MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
		break;
	}

	case MLX5_CMD_OP_CREATE_RQ:
	{
		void *rqc, *wq;

		rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
		wq  = MLX5_ADDR_OF(rqc, rqc, wq);
		MLX5_SET(wq, wq, dbr_umem_valid, 1);
		MLX5_SET(wq, wq, wq_umem_valid, 1);
		break;
	}

	case MLX5_CMD_OP_CREATE_SQ:
	{
		void *sqc, *wq;

		sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
		wq = MLX5_ADDR_OF(sqc, sqc, wq);
		MLX5_SET(wq, wq, dbr_umem_valid, 1);
		MLX5_SET(wq, wq, wq_umem_valid, 1);
		break;
	}

	case MLX5_CMD_OP_MODIFY_CQ:
		MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
		break;

	case MLX5_CMD_OP_CREATE_RMP:
	{
		void *rmpc, *wq;

		rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
		wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
		MLX5_SET(wq, wq, dbr_umem_valid, 1);
		MLX5_SET(wq, wq, wq_umem_valid, 1);
		break;
	}

	case MLX5_CMD_OP_CREATE_XRQ:
	{
		void *xrqc, *wq;

		xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
		wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
		MLX5_SET(wq, wq, dbr_umem_valid, 1);
		MLX5_SET(wq, wq, wq_umem_valid, 1);
		break;
	}

	case MLX5_CMD_OP_CREATE_XRC_SRQ:
	{
		void *xrc_srqc;

		MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
		xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
					xrc_srq_context_entry);
		MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
		break;
	}

	default:
		return;
	}
}

Y
Yishai Hadas 已提交
745
static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
746
{
Y
Yishai Hadas 已提交
747
	*opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
748

Y
Yishai Hadas 已提交
749
	switch (*opcode) {
750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
	case MLX5_CMD_OP_CREATE_MKEY:
	case MLX5_CMD_OP_CREATE_CQ:
	case MLX5_CMD_OP_ALLOC_PD:
	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
	case MLX5_CMD_OP_CREATE_RMP:
	case MLX5_CMD_OP_CREATE_SQ:
	case MLX5_CMD_OP_CREATE_RQ:
	case MLX5_CMD_OP_CREATE_RQT:
	case MLX5_CMD_OP_CREATE_TIR:
	case MLX5_CMD_OP_CREATE_TIS:
	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
765
	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
	case MLX5_CMD_OP_CREATE_QP:
	case MLX5_CMD_OP_CREATE_SRQ:
	case MLX5_CMD_OP_CREATE_XRC_SRQ:
	case MLX5_CMD_OP_CREATE_DCT:
	case MLX5_CMD_OP_CREATE_XRQ:
	case MLX5_CMD_OP_ATTACH_TO_MCG:
	case MLX5_CMD_OP_ALLOC_XRCD:
		return true;
	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
	{
		u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
		if (op_mod == 0)
			return true;
		return false;
	}
785 786 787 788 789 790 791 792
	case MLX5_CMD_OP_CREATE_PSV:
	{
		u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);

		if (num_psv == 1)
			return true;
		return false;
	}
793 794 795 796 797
	default:
		return false;
	}
}

798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
static bool devx_is_obj_modify_cmd(const void *in)
{
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);

	switch (opcode) {
	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
	case MLX5_CMD_OP_MODIFY_CQ:
	case MLX5_CMD_OP_MODIFY_RMP:
	case MLX5_CMD_OP_MODIFY_SQ:
	case MLX5_CMD_OP_MODIFY_RQ:
	case MLX5_CMD_OP_MODIFY_RQT:
	case MLX5_CMD_OP_MODIFY_TIR:
	case MLX5_CMD_OP_MODIFY_TIS:
	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
	case MLX5_CMD_OP_RST2INIT_QP:
	case MLX5_CMD_OP_INIT2RTR_QP:
	case MLX5_CMD_OP_RTR2RTS_QP:
	case MLX5_CMD_OP_RTS2RTS_QP:
	case MLX5_CMD_OP_SQERR2RTS_QP:
	case MLX5_CMD_OP_2ERR_QP:
	case MLX5_CMD_OP_2RST_QP:
	case MLX5_CMD_OP_ARM_XRC_SRQ:
	case MLX5_CMD_OP_ARM_RQ:
	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
	case MLX5_CMD_OP_ARM_XRQ:
826
	case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
827 828
	case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
	case MLX5_CMD_OP_MODIFY_XRQ:
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
		return true;
	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
	{
		u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);

		if (op_mod == 1)
			return true;
		return false;
	}
	default:
		return false;
	}
}

static bool devx_is_obj_query_cmd(const void *in)
{
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);

	switch (opcode) {
	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
	case MLX5_CMD_OP_QUERY_MKEY:
	case MLX5_CMD_OP_QUERY_CQ:
	case MLX5_CMD_OP_QUERY_RMP:
	case MLX5_CMD_OP_QUERY_SQ:
	case MLX5_CMD_OP_QUERY_RQ:
	case MLX5_CMD_OP_QUERY_RQT:
	case MLX5_CMD_OP_QUERY_TIR:
	case MLX5_CMD_OP_QUERY_TIS:
	case MLX5_CMD_OP_QUERY_Q_COUNTER:
	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
	case MLX5_CMD_OP_QUERY_QP:
	case MLX5_CMD_OP_QUERY_SRQ:
	case MLX5_CMD_OP_QUERY_XRC_SRQ:
	case MLX5_CMD_OP_QUERY_DCT:
	case MLX5_CMD_OP_QUERY_XRQ:
870 871 872
	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
873 874 875 876 877 878
		return true;
	default:
		return false;
	}
}

879 880 881 882 883 884 885
static bool devx_is_whitelist_cmd(void *in)
{
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);

	switch (opcode) {
	case MLX5_CMD_OP_QUERY_HCA_CAP:
	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
886
	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
		return true;
	default:
		return false;
	}
}

static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
{
	if (devx_is_whitelist_cmd(cmd_in)) {
		struct mlx5_ib_dev *dev;

		if (c->devx_uid)
			return c->devx_uid;

		dev = to_mdev(c->ibucontext.device);
		if (dev->devx_whitelist_uid)
			return dev->devx_whitelist_uid;

		return -EOPNOTSUPP;
	}

	if (!c->devx_uid)
		return -EINVAL;

	return c->devx_uid;
}
913 914

static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
915 916 917
{
	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);

918 919 920 921 922
	/* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
	if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
	     MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
	    (opcode >= MLX5_CMD_OP_GENERAL_START &&
	     opcode < MLX5_CMD_OP_GENERAL_END))
923 924
		return true;

925 926
	switch (opcode) {
	case MLX5_CMD_OP_QUERY_HCA_CAP:
927
	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
928
	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
929 930 931 932 933 934 935 936 937 938 939 940
	case MLX5_CMD_OP_QUERY_VPORT_STATE:
	case MLX5_CMD_OP_QUERY_ADAPTER:
	case MLX5_CMD_OP_QUERY_ISSI:
	case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
	case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
	case MLX5_CMD_OP_QUERY_VNIC_ENV:
	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
	case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
	case MLX5_CMD_OP_NOP:
	case MLX5_CMD_OP_QUERY_CONG_STATUS:
	case MLX5_CMD_OP_QUERY_CONG_PARAMS:
	case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
941
	case MLX5_CMD_OP_QUERY_LAG:
942 943 944 945 946 947
		return true;
	default:
		return false;
	}
}

948
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
949
	struct uverbs_attr_bundle *attrs)
950
{
951 952
	struct mlx5_ib_ucontext *c;
	struct mlx5_ib_dev *dev;
953 954 955 956 957 958 959 960 961
	int user_vector;
	int dev_eqn;
	unsigned int irqn;
	int err;

	if (uverbs_copy_from(&user_vector, attrs,
			     MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
		return -EFAULT;

962
	c = devx_ufile2uctx(attrs);
963 964 965 966
	if (IS_ERR(c))
		return PTR_ERR(c);
	dev = to_mdev(c->ibucontext.device);

967 968 969 970 971 972 973 974 975 976 977
	err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
	if (err < 0)
		return err;

	if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
			   &dev_eqn, sizeof(dev_eqn)))
		return -EFAULT;

	return 0;
}

978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
/*
 *Security note:
 * The hardware protection mechanism works like this: Each device object that
 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
 * the device specification manual) upon its creation. Then upon doorbell,
 * hardware fetches the object context for which the doorbell was rang, and
 * validates that the UAR through which the DB was rang matches the UAR ID
 * of the object.
 * If no match the doorbell is silently ignored by the hardware. Of course,
 * the user cannot ring a doorbell on a UAR that was not mapped to it.
 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
 * mailboxes (except tagging them with UID), we expose to the user its UAR
 * ID, so it can embed it in these objects in the expected specification
 * format. So the only thing the user can do is hurt itself by creating a
 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
 * may ring a doorbell on its objects.
 * The consequence of that will be that another user can schedule a QP/SQ
 * of the buggy user for execution (just insert it to the hardware schedule
 * queue or arm its CQ for event generation), no further harm is expected.
 */
998
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
999
	struct uverbs_attr_bundle *attrs)
1000
{
1001 1002
	struct mlx5_ib_ucontext *c;
	struct mlx5_ib_dev *dev;
1003 1004 1005
	u32 user_idx;
	s32 dev_idx;

1006
	c = devx_ufile2uctx(attrs);
1007 1008 1009 1010
	if (IS_ERR(c))
		return PTR_ERR(c);
	dev = to_mdev(c->ibucontext.device);

1011 1012 1013 1014
	if (uverbs_copy_from(&user_idx, attrs,
			     MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
		return -EFAULT;

1015
	dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
	if (dev_idx < 0)
		return dev_idx;

	if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
			   &dev_idx, sizeof(dev_idx)))
		return -EFAULT;

	return 0;
}

1026
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1027
	struct uverbs_attr_bundle *attrs)
1028
{
1029 1030
	struct mlx5_ib_ucontext *c;
	struct mlx5_ib_dev *dev;
1031 1032
	void *cmd_in = uverbs_attr_get_alloced_ptr(
		attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1033 1034 1035 1036
	int cmd_out_len = uverbs_attr_get_len(attrs,
					MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
	void *cmd_out;
	int err;
1037
	int uid;
1038

1039
	c = devx_ufile2uctx(attrs);
1040 1041 1042 1043
	if (IS_ERR(c))
		return PTR_ERR(c);
	dev = to_mdev(c->ibucontext.device);

1044 1045 1046
	uid = devx_get_uid(c, cmd_in);
	if (uid < 0)
		return uid;
1047 1048

	/* Only white list of some general HCA commands are allowed for this method. */
1049
	if (!devx_is_general_cmd(cmd_in, dev))
1050 1051
		return -EINVAL;

1052 1053 1054
	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
	if (IS_ERR(cmd_out))
		return PTR_ERR(cmd_out);
1055

1056
	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1057 1058 1059 1060
	err = mlx5_cmd_exec(dev->mdev, cmd_in,
			    uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
			    cmd_out, cmd_out_len);
	if (err)
1061
		return err;
1062

1063 1064
	return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
			      cmd_out_len);
1065 1066
}

1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
				       u32 *dinlen,
				       u32 *obj_id)
{
	u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
	u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);

	*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
	*dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);

	MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
	MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);

	switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
		MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
		break;

1086 1087 1088 1089
	case MLX5_CMD_OP_CREATE_UMEM:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DESTROY_UMEM);
		break;
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
	case MLX5_CMD_OP_CREATE_MKEY:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
		break;
	case MLX5_CMD_OP_CREATE_CQ:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
		break;
	case MLX5_CMD_OP_ALLOC_PD:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
		break;
	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
		break;
	case MLX5_CMD_OP_CREATE_RMP:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
		break;
	case MLX5_CMD_OP_CREATE_SQ:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
		break;
	case MLX5_CMD_OP_CREATE_RQ:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
		break;
	case MLX5_CMD_OP_CREATE_RQT:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
		break;
	case MLX5_CMD_OP_CREATE_TIR:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
		break;
	case MLX5_CMD_OP_CREATE_TIS:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
		break;
	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
		break;
	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
		*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
		*obj_id = MLX5_GET(create_flow_table_out, out, table_id);
		MLX5_SET(destroy_flow_table_in, din, other_vport,
			 MLX5_GET(create_flow_table_in,  in, other_vport));
		MLX5_SET(destroy_flow_table_in, din, vport_number,
			 MLX5_GET(create_flow_table_in,  in, vport_number));
		MLX5_SET(destroy_flow_table_in, din, table_type,
			 MLX5_GET(create_flow_table_in,  in, table_type));
		MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
		break;
	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
		*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
		*obj_id = MLX5_GET(create_flow_group_out, out, group_id);
		MLX5_SET(destroy_flow_group_in, din, other_vport,
			 MLX5_GET(create_flow_group_in, in, other_vport));
		MLX5_SET(destroy_flow_group_in, din, vport_number,
			 MLX5_GET(create_flow_group_in, in, vport_number));
		MLX5_SET(destroy_flow_group_in, din, table_type,
			 MLX5_GET(create_flow_group_in, in, table_type));
		MLX5_SET(destroy_flow_group_in, din, table_id,
			 MLX5_GET(create_flow_group_in, in, table_id));
		MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
		break;
	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
		*dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
		*obj_id = MLX5_GET(set_fte_in, in, flow_index);
		MLX5_SET(delete_fte_in, din, other_vport,
			 MLX5_GET(set_fte_in,  in, other_vport));
		MLX5_SET(delete_fte_in, din, vport_number,
			 MLX5_GET(set_fte_in, in, vport_number));
		MLX5_SET(delete_fte_in, din, table_type,
			 MLX5_GET(set_fte_in, in, table_type));
		MLX5_SET(delete_fte_in, din, table_id,
			 MLX5_GET(set_fte_in, in, table_id));
		MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
		break;
	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
		break;
1172
	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1173
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1174
			 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
		break;
	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
		break;
	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
		*dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
		*obj_id = MLX5_GET(create_scheduling_element_out, out,
				   scheduling_element_id);
		MLX5_SET(destroy_scheduling_element_in, din,
			 scheduling_hierarchy,
			 MLX5_GET(create_scheduling_element_in, in,
				  scheduling_hierarchy));
		MLX5_SET(destroy_scheduling_element_in, din,
			 scheduling_element_id, *obj_id);
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
		break;
	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
		*dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
		*obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
		MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
		break;
	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
		*dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
		*obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
		MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
		break;
	case MLX5_CMD_OP_CREATE_QP:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
		break;
	case MLX5_CMD_OP_CREATE_SRQ:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
		break;
	case MLX5_CMD_OP_CREATE_XRC_SRQ:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DESTROY_XRC_SRQ);
		break;
	case MLX5_CMD_OP_CREATE_DCT:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
		break;
	case MLX5_CMD_OP_CREATE_XRQ:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
		break;
	case MLX5_CMD_OP_ATTACH_TO_MCG:
		*dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
		MLX5_SET(detach_from_mcg_in, din, qpn,
			 MLX5_GET(attach_to_mcg_in, in, qpn));
		memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
		       MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
		       MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
		break;
	case MLX5_CMD_OP_ALLOC_XRCD:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
		break;
1235 1236 1237 1238 1239 1240
	case MLX5_CMD_OP_CREATE_PSV:
		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
			 MLX5_CMD_OP_DESTROY_PSV);
		MLX5_SET(destroy_psv_in, din, psvn,
			 MLX5_GET(create_psv_out, out, psv0_index));
		break;
1241 1242 1243 1244 1245 1246 1247
	default:
		/* The entry must match to one of the devx_is_obj_create_cmd */
		WARN_ON(true);
		break;
	}
}

1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
static int devx_handle_mkey_indirect(struct devx_obj *obj,
				     struct mlx5_ib_dev *dev,
				     void *in, void *out)
{
	struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
	struct mlx5_core_mkey *mkey;
	void *mkc;
	u8 key;

	mkey = &devx_mr->mmkey;
	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
	key = MLX5_GET(mkc, mkc, mkey_7_0);
	mkey->key = mlx5_idx_to_mkey(
			MLX5_GET(create_mkey_out, out, mkey_index)) | key;
	mkey->type = MLX5_MKEY_INDIRECT_DEVX;
	mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
	mkey->size = MLX5_GET64(mkc, mkc, len);
	mkey->pd = MLX5_GET(mkc, mkc, pd);
	devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);

1268 1269
	return xa_err(xa_store(&dev->mdev->priv.mkey_table,
			       mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
1270 1271
}

1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
				   struct devx_obj *obj,
				   void *in, int in_len)
{
	int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
			MLX5_FLD_SZ_BYTES(create_mkey_in,
			memory_key_mkey_entry);
	void *mkc;
	u8 access_mode;

	if (in_len < min_len)
		return -EINVAL;

	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);

	access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
	access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;

	if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1291 1292 1293
		access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
		if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
			obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1294
		return 0;
1295
	}
1296 1297 1298 1299 1300

	MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
	return 0;
}

1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
				      struct devx_event_subscription *sub)
{
	struct devx_event *event;
	struct devx_obj_event *xa_val_level2;

	if (sub->is_cleaned)
		return;

	sub->is_cleaned = 1;
	list_del_rcu(&sub->xa_list);

	if (list_empty(&sub->obj_list))
		return;

	list_del_rcu(&sub->obj_list);
	/* check whether key level 1 for this obj_sub_list is empty */
	event = xa_load(&dev->devx_event_table.event_xa,
			sub->xa_key_level1);
	WARN_ON(!event);

	xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
	if (list_empty(&xa_val_level2->obj_sub_list)) {
		xa_erase(&event->object_ids,
			 sub->xa_key_level2);
		kfree_rcu(xa_val_level2, rcu);
	}
}

1330
static int devx_obj_cleanup(struct ib_uobject *uobject,
1331 1332
			    enum rdma_remove_reason why,
			    struct uverbs_attr_bundle *attrs)
1333 1334
{
	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1335
	struct mlx5_devx_event_table *devx_event_table;
1336
	struct devx_obj *obj = uobject->object;
1337 1338
	struct devx_event_subscription *sub_entry, *tmp;
	struct mlx5_ib_dev *dev;
1339 1340
	int ret;

1341
	dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
		/*
		 * The pagefault_single_data_segment() does commands against
		 * the mmkey, we must wait for that to stop before freeing the
		 * mkey, as another allocation could get the same mkey #.
		 */
		xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
			 mlx5_base_mkey(obj->devx_mr.mmkey.key));
		synchronize_srcu(&dev->mr_srcu);
	}
1352

1353
	if (obj->flags & DEVX_OBJ_FLAGS_DCT)
Y
Yishai Hadas 已提交
1354
		ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1355
	else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
Y
Yishai Hadas 已提交
1356
		ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1357
	else
Y
Yishai Hadas 已提交
1358 1359
		ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
				    obj->dinlen, out, sizeof(out));
1360
	if (ib_is_destroy_retryable(ret, why, uobject))
1361 1362
		return ret;

1363
	devx_event_table = &dev->devx_event_table;
1364

1365 1366 1367 1368 1369
	mutex_lock(&devx_event_table->event_xa_lock);
	list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
		devx_cleanup_subscription(dev, sub_entry);
	mutex_unlock(&devx_event_table->event_xa_lock);

1370 1371 1372 1373
	kfree(obj);
	return ret;
}

1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
{
	struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
	struct mlx5_devx_event_table *table;
	struct devx_event *event;
	struct devx_obj_event *obj_event;
	u32 obj_id = mcq->cqn;

	table = &obj->ib_dev->devx_event_table;
	rcu_read_lock();
	event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
	if (!event)
		goto out;

	obj_event = xa_load(&event->object_ids, obj_id);
	if (!obj_event)
		goto out;

	dispatch_event_fd(&obj_event->obj_sub_list, eqe);
out:
	rcu_read_unlock();
}

1397
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1398
	struct uverbs_attr_bundle *attrs)
1399 1400 1401 1402
{
	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
	int cmd_out_len =  uverbs_attr_get_len(attrs,
					MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1403 1404
	int cmd_in_len = uverbs_attr_get_len(attrs,
					MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1405
	void *cmd_out;
1406 1407
	struct ib_uobject *uobj = uverbs_attr_get_uobject(
		attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1408 1409
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1410
	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1411
	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1412
	struct devx_obj *obj;
1413
	u16 obj_type = 0;
1414
	int err;
1415
	int uid;
Y
Yishai Hadas 已提交
1416 1417
	u32 obj_id;
	u16 opcode;
1418

1419 1420 1421
	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
		return -EINVAL;

1422 1423 1424
	uid = devx_get_uid(c, cmd_in);
	if (uid < 0)
		return uid;
1425

Y
Yishai Hadas 已提交
1426
	if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1427 1428
		return -EINVAL;

1429 1430 1431 1432
	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
	if (IS_ERR(cmd_out))
		return PTR_ERR(cmd_out);

1433 1434 1435 1436
	obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
	if (!obj)
		return -ENOMEM;

1437
	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1438 1439 1440 1441 1442 1443 1444
	if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
		err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
		if (err)
			goto obj_free;
	} else {
		devx_set_umem_valid(cmd_in);
	}
1445

1446 1447 1448 1449 1450
	if (opcode == MLX5_CMD_OP_CREATE_DCT) {
		obj->flags |= DEVX_OBJ_FLAGS_DCT;
		err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
					   cmd_in, cmd_in_len,
					   cmd_out, cmd_out_len);
1451 1452 1453 1454 1455 1456
	} else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
		obj->flags |= DEVX_OBJ_FLAGS_CQ;
		obj->core_cq.comp = devx_cq_comp;
		err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
					  cmd_in, cmd_in_len, cmd_out,
					  cmd_out_len);
1457 1458 1459 1460 1461 1462
	} else {
		err = mlx5_cmd_exec(dev->mdev, cmd_in,
				    cmd_in_len,
				    cmd_out, cmd_out_len);
	}

1463
	if (err)
1464
		goto obj_free;
1465 1466

	uobj->object = obj;
1467
	INIT_LIST_HEAD(&obj->event_sub);
1468
	obj->ib_dev = dev;
Y
Yishai Hadas 已提交
1469 1470
	devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
				   &obj_id);
1471 1472 1473 1474
	WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));

	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
	if (err)
1475
		goto obj_destroy;
1476

1477 1478 1479 1480
	if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
		obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
	obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);

1481 1482 1483 1484 1485
	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
		err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
		if (err)
			goto obj_destroy;
	}
1486 1487
	return 0;

1488
obj_destroy:
1489
	if (obj->flags & DEVX_OBJ_FLAGS_DCT)
Y
Yishai Hadas 已提交
1490
		mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1491
	else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
Y
Yishai Hadas 已提交
1492
		mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1493
	else
Y
Yishai Hadas 已提交
1494
		mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1495
			      sizeof(out));
1496 1497 1498 1499 1500
obj_free:
	kfree(obj);
	return err;
}

1501
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1502
	struct uverbs_attr_bundle *attrs)
1503 1504 1505 1506 1507 1508
{
	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
	int cmd_out_len = uverbs_attr_get_len(attrs,
					MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
							  MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1509 1510 1511
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1512 1513
	void *cmd_out;
	int err;
1514
	int uid;
1515

1516 1517 1518
	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
		return -EINVAL;

1519 1520 1521
	uid = devx_get_uid(c, cmd_in);
	if (uid < 0)
		return uid;
1522 1523 1524 1525

	if (!devx_is_obj_modify_cmd(cmd_in))
		return -EINVAL;

1526
	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1527 1528
		return -EINVAL;

1529 1530 1531
	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
	if (IS_ERR(cmd_out))
		return PTR_ERR(cmd_out);
1532

1533
	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1534 1535
	devx_set_umem_valid(cmd_in);

1536
	err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1537 1538 1539
			    uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
			    cmd_out, cmd_out_len);
	if (err)
1540
		return err;
1541

1542 1543
	return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
			      cmd_out, cmd_out_len);
1544 1545
}

1546
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1547
	struct uverbs_attr_bundle *attrs)
1548 1549 1550 1551 1552 1553
{
	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
	int cmd_out_len = uverbs_attr_get_len(attrs,
					      MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
							  MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1554 1555
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1556 1557
	void *cmd_out;
	int err;
1558
	int uid;
1559
	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1560

1561 1562 1563
	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
		return -EINVAL;

1564 1565 1566
	uid = devx_get_uid(c, cmd_in);
	if (uid < 0)
		return uid;
1567 1568 1569 1570

	if (!devx_is_obj_query_cmd(cmd_in))
		return -EINVAL;

1571
	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1572 1573
		return -EINVAL;

1574 1575 1576
	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
	if (IS_ERR(cmd_out))
		return PTR_ERR(cmd_out);
1577

1578
	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1579
	err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1580 1581 1582
			    uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
			    cmd_out, cmd_out_len);
	if (err)
1583
		return err;
1584

1585 1586
	return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
			      cmd_out, cmd_out_len);
1587 1588
}

1589 1590 1591 1592
struct devx_async_event_queue {
	spinlock_t		lock;
	wait_queue_head_t	poll_wait;
	struct list_head	event_list;
1593
	atomic_t		bytes_in_use;
1594
	u8			is_destroyed:1;
1595 1596 1597 1598 1599
};

struct devx_async_cmd_event_file {
	struct ib_uobject		uobj;
	struct devx_async_event_queue	ev_queue;
1600
	struct mlx5_async_ctx		async_ctx;
1601 1602 1603 1604 1605 1606 1607
};

static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
{
	spin_lock_init(&ev_queue->lock);
	INIT_LIST_HEAD(&ev_queue->event_list);
	init_waitqueue_head(&ev_queue->poll_wait);
1608
	atomic_set(&ev_queue->bytes_in_use, 0);
1609
	ev_queue->is_destroyed = 0;
1610 1611 1612 1613 1614 1615 1616 1617 1618
}

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
	struct uverbs_attr_bundle *attrs)
{
	struct devx_async_cmd_event_file *ev_file;

	struct ib_uobject *uobj = uverbs_attr_get_uobject(
		attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1619
	struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1620 1621 1622 1623

	ev_file = container_of(uobj, struct devx_async_cmd_event_file,
			       uobj);
	devx_init_event_queue(&ev_file->ev_queue);
1624 1625 1626 1627
	mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
	return 0;
}

1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
	struct uverbs_attr_bundle *attrs)
{
	struct ib_uobject *uobj = uverbs_attr_get_uobject(
		attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
	struct devx_async_event_file *ev_file;
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
	u32 flags;
	int err;

	err = uverbs_get_flags32(&flags, attrs,
		MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
		MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);

	if (err)
		return err;

	ev_file = container_of(uobj, struct devx_async_event_file,
			       uobj);
	spin_lock_init(&ev_file->lock);
	INIT_LIST_HEAD(&ev_file->event_list);
	init_waitqueue_head(&ev_file->poll_wait);
	if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
		ev_file->omit_data = 1;
	INIT_LIST_HEAD(&ev_file->subscribed_events_list);
	ev_file->dev = dev;
1656
	get_device(&dev->ib_dev.dev);
1657 1658 1659
	return 0;
}

1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691
static void devx_query_callback(int status, struct mlx5_async_work *context)
{
	struct devx_async_data *async_data =
		container_of(context, struct devx_async_data, cb_work);
	struct ib_uobject *fd_uobj = async_data->fd_uobj;
	struct devx_async_cmd_event_file *ev_file;
	struct devx_async_event_queue *ev_queue;
	unsigned long flags;

	ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
			       uobj);
	ev_queue = &ev_file->ev_queue;

	spin_lock_irqsave(&ev_queue->lock, flags);
	list_add_tail(&async_data->list, &ev_queue->event_list);
	spin_unlock_irqrestore(&ev_queue->lock, flags);

	wake_up_interruptible(&ev_queue->poll_wait);
	fput(fd_uobj->object);
}

#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
	struct uverbs_attr_bundle *attrs)
{
	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
	struct ib_uobject *uobj = uverbs_attr_get_uobject(
				attrs,
				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
	u16 cmd_out_len;
1692 1693
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1694 1695 1696
	struct ib_uobject *fd_uobj;
	int err;
	int uid;
1697
	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1698 1699 1700
	struct devx_async_cmd_event_file *ev_file;
	struct devx_async_data *async_data;

1701 1702 1703
	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
		return -EINVAL;

1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
	uid = devx_get_uid(c, cmd_in);
	if (uid < 0)
		return uid;

	if (!devx_is_obj_query_cmd(cmd_in))
		return -EINVAL;

	err = uverbs_get_const(&cmd_out_len, attrs,
			       MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
	if (err)
		return err;

1716
	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760
		return -EINVAL;

	fd_uobj = uverbs_attr_get_uobject(attrs,
				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
	if (IS_ERR(fd_uobj))
		return PTR_ERR(fd_uobj);

	ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
			       uobj);

	if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
			MAX_ASYNC_BYTES_IN_USE) {
		atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
		return -EAGAIN;
	}

	async_data = kvzalloc(struct_size(async_data, hdr.out_data,
					  cmd_out_len), GFP_KERNEL);
	if (!async_data) {
		err = -ENOMEM;
		goto sub_bytes;
	}

	err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
			       MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
	if (err)
		goto free_async;

	async_data->cmd_out_len = cmd_out_len;
	async_data->mdev = mdev;
	async_data->fd_uobj = fd_uobj;

	get_file(fd_uobj->object);
	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
	err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
		    uverbs_attr_get_len(attrs,
				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
		    async_data->hdr.out_data,
		    async_data->cmd_out_len,
		    devx_query_callback, &async_data->cb_work);

	if (err)
		goto cb_err;

1761
	return 0;
1762 1763 1764 1765 1766 1767 1768 1769

cb_err:
	fput(fd_uobj->object);
free_async:
	kvfree(async_data);
sub_bytes:
	atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
	return err;
1770 1771
}

1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
static void
subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
			   u32 key_level1,
			   bool is_level2,
			   u32 key_level2)
{
	struct devx_event *event;
	struct devx_obj_event *xa_val_level2;

	/* Level 1 is valid for future use, no need to free */
	if (!is_level2)
		return;

	event = xa_load(&devx_event_table->event_xa, key_level1);
	WARN_ON(!event);

	xa_val_level2 = xa_load(&event->object_ids,
				key_level2);
	if (list_empty(&xa_val_level2->obj_sub_list)) {
		xa_erase(&event->object_ids,
			 key_level2);
		kfree_rcu(xa_val_level2, rcu);
	}
}

static int
subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
			 u32 key_level1,
			 bool is_level2,
			 u32 key_level2)
{
	struct devx_obj_event *obj_event;
	struct devx_event *event;
	int err;

	event = xa_load(&devx_event_table->event_xa, key_level1);
	if (!event) {
		event = kzalloc(sizeof(*event), GFP_KERNEL);
		if (!event)
			return -ENOMEM;

		INIT_LIST_HEAD(&event->unaffiliated_list);
		xa_init(&event->object_ids);

		err = xa_insert(&devx_event_table->event_xa,
				key_level1,
				event,
				GFP_KERNEL);
		if (err) {
			kfree(event);
			return err;
		}
	}

	if (!is_level2)
		return 0;

	obj_event = xa_load(&event->object_ids, key_level2);
	if (!obj_event) {
		obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
		if (!obj_event)
			/* Level1 is valid for future use, no need to free */
			return -ENOMEM;

		err = xa_insert(&event->object_ids,
				key_level2,
				obj_event,
				GFP_KERNEL);
		if (err)
			return err;
		INIT_LIST_HEAD(&obj_event->obj_sub_list);
	}

	return 0;
}

static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
				   struct devx_obj *obj)
{
	int i;

	for (i = 0; i < num_events; i++) {
		if (obj) {
			if (!is_legacy_obj_event_num(event_type_num_list[i]))
				return false;
		} else if (!is_legacy_unaffiliated_event_num(
				event_type_num_list[i])) {
			return false;
		}
	}

	return true;
}

#define MAX_SUPP_EVENT_NUM 255
static bool is_valid_events(struct mlx5_core_dev *dev,
			    int num_events, u16 *event_type_num_list,
			    struct devx_obj *obj)
{
	__be64 *aff_events;
	__be64 *unaff_events;
	int mask_entry;
	int mask_bit;
	int i;

	if (MLX5_CAP_GEN(dev, event_cap)) {
		aff_events = MLX5_CAP_DEV_EVENT(dev,
						user_affiliated_events);
		unaff_events = MLX5_CAP_DEV_EVENT(dev,
						  user_unaffiliated_events);
	} else {
		return is_valid_events_legacy(num_events, event_type_num_list,
					      obj);
	}

	for (i = 0; i < num_events; i++) {
		if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
			return false;

		mask_entry = event_type_num_list[i] / 64;
		mask_bit = event_type_num_list[i] % 64;

		if (obj) {
			/* CQ completion */
			if (event_type_num_list[i] == 0)
				continue;

			if (!(be64_to_cpu(aff_events[mask_entry]) &
					(1ull << mask_bit)))
				return false;

			continue;
		}

		if (!(be64_to_cpu(unaff_events[mask_entry]) &
				(1ull << mask_bit)))
			return false;
	}

	return true;
}

#define MAX_NUM_EVENTS 16
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
	struct uverbs_attr_bundle *attrs)
{
	struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
				attrs,
				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
	struct ib_uobject *fd_uobj;
	struct devx_obj *obj = NULL;
	struct devx_async_event_file *ev_file;
	struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
	u16 *event_type_num_list;
	struct devx_event_subscription *event_sub, *tmp_sub;
	struct list_head sub_list;
	int redirect_fd;
	bool use_eventfd = false;
	int num_events;
	int num_alloc_xa_entries = 0;
	u16 obj_type = 0;
	u64 cookie = 0;
	u32 obj_id = 0;
	int err;
	int i;

	if (!c->devx_uid)
		return -EINVAL;

	if (!IS_ERR(devx_uobj)) {
		obj = (struct devx_obj *)devx_uobj->object;
		if (obj)
			obj_id = get_dec_obj_id(obj->obj_id);
	}

	fd_uobj = uverbs_attr_get_uobject(attrs,
				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
	if (IS_ERR(fd_uobj))
		return PTR_ERR(fd_uobj);

	ev_file = container_of(fd_uobj, struct devx_async_event_file,
			       uobj);

	if (uverbs_attr_is_valid(attrs,
				 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
		err = uverbs_copy_from(&redirect_fd, attrs,
			       MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
		if (err)
			return err;

		use_eventfd = true;
	}

	if (uverbs_attr_is_valid(attrs,
				 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
		if (use_eventfd)
			return -EINVAL;

		err = uverbs_copy_from(&cookie, attrs,
				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
		if (err)
			return err;
	}

	num_events = uverbs_attr_ptr_get_array_size(
		attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
		sizeof(u16));

	if (num_events < 0)
		return num_events;

	if (num_events > MAX_NUM_EVENTS)
		return -EINVAL;

	event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
			MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);

	if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
		return -EINVAL;

	INIT_LIST_HEAD(&sub_list);

	/* Protect from concurrent subscriptions to same XA entries to allow
	 * both to succeed
	 */
	mutex_lock(&devx_event_table->event_xa_lock);
	for (i = 0; i < num_events; i++) {
		u32 key_level1;

		if (obj)
			obj_type = get_dec_obj_type(obj,
						    event_type_num_list[i]);
		key_level1 = event_type_num_list[i] | obj_type << 16;

		err = subscribe_event_xa_alloc(devx_event_table,
					       key_level1,
					       obj,
					       obj_id);
		if (err)
			goto err;

		num_alloc_xa_entries++;
		event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
		if (!event_sub)
			goto err;

		list_add_tail(&event_sub->event_list, &sub_list);
		if (use_eventfd) {
			event_sub->eventfd =
				eventfd_ctx_fdget(redirect_fd);

2026
			if (IS_ERR(event_sub->eventfd)) {
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
				err = PTR_ERR(event_sub->eventfd);
				event_sub->eventfd = NULL;
				goto err;
			}
		}

		event_sub->cookie = cookie;
		event_sub->ev_file = ev_file;
		event_sub->filp = fd_uobj->object;
		/* May be needed upon cleanup the devx object/subscription */
		event_sub->xa_key_level1 = key_level1;
		event_sub->xa_key_level2 = obj_id;
		INIT_LIST_HEAD(&event_sub->obj_list);
	}

	/* Once all the allocations and the XA data insertions were done we
	 * can go ahead and add all the subscriptions to the relevant lists
	 * without concern of a failure.
	 */
	list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
		struct devx_event *event;
		struct devx_obj_event *obj_event;

		list_del_init(&event_sub->event_list);

		spin_lock_irq(&ev_file->lock);
		list_add_tail_rcu(&event_sub->file_list,
				  &ev_file->subscribed_events_list);
		spin_unlock_irq(&ev_file->lock);

		event = xa_load(&devx_event_table->event_xa,
				event_sub->xa_key_level1);
		WARN_ON(!event);

		if (!obj) {
			list_add_tail_rcu(&event_sub->xa_list,
					  &event->unaffiliated_list);
			continue;
		}

		obj_event = xa_load(&event->object_ids, obj_id);
		WARN_ON(!obj_event);
		list_add_tail_rcu(&event_sub->xa_list,
				  &obj_event->obj_sub_list);
		list_add_tail_rcu(&event_sub->obj_list,
				  &obj->event_sub);
	}

	mutex_unlock(&devx_event_table->event_xa_lock);
	return 0;

err:
	list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
		list_del(&event_sub->event_list);

		subscribe_event_xa_dealloc(devx_event_table,
					   event_sub->xa_key_level1,
					   obj,
					   obj_id);

		if (event_sub->eventfd)
			eventfd_ctx_put(event_sub->eventfd);

		kfree(event_sub);
	}

	mutex_unlock(&devx_event_table->event_xa_lock);
	return err;
}

2097 2098 2099 2100 2101 2102
static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
			 struct uverbs_attr_bundle *attrs,
			 struct devx_umem *obj)
{
	u64 addr;
	size_t size;
2103
	u32 access;
2104 2105 2106 2107 2108
	int npages;
	int err;
	u32 page_mask;

	if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2109
	    uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2110 2111
		return -EFAULT;

2112 2113
	err = uverbs_get_flags32(&access, attrs,
				 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2114 2115 2116
				 IB_ACCESS_LOCAL_WRITE |
				 IB_ACCESS_REMOTE_WRITE |
				 IB_ACCESS_REMOTE_READ);
2117 2118 2119
	if (err)
		return err;

2120 2121 2122 2123
	err = ib_check_mr_access(access);
	if (err)
		return err;

2124
	obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142
	if (IS_ERR(obj->umem))
		return PTR_ERR(obj->umem);

	mlx5_ib_cont_pages(obj->umem, obj->umem->address,
			   MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
			   &obj->page_shift, &obj->ncont, NULL);

	if (!npages) {
		ib_umem_release(obj->umem);
		return -EINVAL;
	}

	page_mask = (1 << obj->page_shift) - 1;
	obj->page_offset = obj->umem->address & page_mask;

	return 0;
}

2143 2144
static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
				   struct devx_umem *obj,
2145 2146 2147 2148
				   struct devx_umem_reg_cmd *cmd)
{
	cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
		    (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
2149 2150
	cmd->in = uverbs_zalloc(attrs, cmd->inlen);
	return PTR_ERR_OR_ZERO(cmd->in);
2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
}

static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
				    struct devx_umem *obj,
				    struct devx_umem_reg_cmd *cmd)
{
	void *umem;
	__be64 *mtt;

	umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
	mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);

2163
	MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2164 2165 2166 2167 2168 2169 2170 2171 2172
	MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
	MLX5_SET(umem, umem, log_page_size, obj->page_shift -
					    MLX5_ADAPTER_PAGE_SHIFT);
	MLX5_SET(umem, umem, page_offset, obj->page_offset);
	mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
			     (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
			     MLX5_IB_MTT_READ);
}

2173
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2174
	struct uverbs_attr_bundle *attrs)
2175 2176 2177
{
	struct devx_umem_reg_cmd cmd;
	struct devx_umem *obj;
2178 2179
	struct ib_uobject *uobj = uverbs_attr_get_uobject(
		attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2180
	u32 obj_id;
2181 2182
	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2183
	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2184 2185 2186
	int err;

	if (!c->devx_uid)
2187 2188
		return -EINVAL;

2189 2190 2191 2192 2193 2194 2195 2196
	obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
	if (!obj)
		return -ENOMEM;

	err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
	if (err)
		goto err_obj_free;

2197
	err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
2198 2199 2200 2201 2202
	if (err)
		goto err_umem_release;

	devx_umem_reg_cmd_build(dev, obj, &cmd);

2203
	MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2204 2205 2206
	err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
			    sizeof(cmd.out));
	if (err)
2207
		goto err_umem_release;
2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227

	obj->mdev = dev->mdev;
	uobj->object = obj;
	devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
	if (err)
		goto err_umem_destroy;

	return 0;

err_umem_destroy:
	mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
err_umem_release:
	ib_umem_release(obj->umem);
err_obj_free:
	kfree(obj);
	return err;
}

static int devx_umem_cleanup(struct ib_uobject *uobject,
2228 2229
			     enum rdma_remove_reason why,
			     struct uverbs_attr_bundle *attrs)
2230 2231 2232 2233 2234 2235
{
	struct devx_umem *obj = uobject->object;
	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
	int err;

	err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2236
	if (ib_is_destroy_retryable(err, why, uobject))
2237 2238 2239 2240 2241 2242 2243
		return err;

	ib_umem_release(obj->umem);
	kfree(obj);
	return 0;
}

2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
				  unsigned long event_type)
{
	__be64 *unaff_events;
	int mask_entry;
	int mask_bit;

	if (!MLX5_CAP_GEN(dev, event_cap))
		return is_legacy_unaffiliated_event_num(event_type);

	unaff_events = MLX5_CAP_DEV_EVENT(dev,
					  user_unaffiliated_events);
	WARN_ON(event_type > MAX_SUPP_EVENT_NUM);

	mask_entry = event_type / 64;
	mask_bit = event_type % 64;

	if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
		return false;

	return true;
}

static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
{
	struct mlx5_eqe *eqe = data;
	u32 obj_id = 0;

	switch (event_type) {
	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
	case MLX5_EVENT_TYPE_PATH_MIG:
	case MLX5_EVENT_TYPE_COMM_EST:
	case MLX5_EVENT_TYPE_SQ_DRAINED:
	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
		obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
		break;
2285 2286 2287
	case MLX5_EVENT_TYPE_XRQ_ERROR:
		obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
		break;
2288
	case MLX5_EVENT_TYPE_DCT_DRAINED:
2289
	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
		obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
		break;
	case MLX5_EVENT_TYPE_CQ_ERROR:
		obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
		break;
	default:
		obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
		break;
	}

	return obj_id;
}

static int deliver_event(struct devx_event_subscription *event_sub,
			 const void *data)
{
	struct devx_async_event_file *ev_file;
	struct devx_async_event_data *event_data;
	unsigned long flags;

	ev_file = event_sub->ev_file;

	if (ev_file->omit_data) {
		spin_lock_irqsave(&ev_file->lock, flags);
		if (!list_empty(&event_sub->event_list)) {
			spin_unlock_irqrestore(&ev_file->lock, flags);
			return 0;
		}

		list_add_tail(&event_sub->event_list, &ev_file->event_list);
		spin_unlock_irqrestore(&ev_file->lock, flags);
		wake_up_interruptible(&ev_file->poll_wait);
		return 0;
	}

	event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
			     GFP_ATOMIC);
	if (!event_data) {
		spin_lock_irqsave(&ev_file->lock, flags);
		ev_file->is_overflow_err = 1;
		spin_unlock_irqrestore(&ev_file->lock, flags);
		return -ENOMEM;
	}

	event_data->hdr.cookie = event_sub->cookie;
	memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));

	spin_lock_irqsave(&ev_file->lock, flags);
	list_add_tail(&event_data->list, &ev_file->event_list);
	spin_unlock_irqrestore(&ev_file->lock, flags);
	wake_up_interruptible(&ev_file->poll_wait);

	return 0;
}

static void dispatch_event_fd(struct list_head *fd_list,
			      const void *data)
{
	struct devx_event_subscription *item;

	list_for_each_entry_rcu(item, fd_list, xa_list) {
		if (!get_file_rcu(item->filp))
			continue;

		if (item->eventfd) {
			eventfd_signal(item->eventfd, 1);
			fput(item->filp);
			continue;
		}

		deliver_event(item, data);
		fput(item->filp);
	}
}

2365 2366 2367
static int devx_event_notifier(struct notifier_block *nb,
			       unsigned long event_type, void *data)
{
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
	struct mlx5_devx_event_table *table;
	struct mlx5_ib_dev *dev;
	struct devx_event *event;
	struct devx_obj_event *obj_event;
	u16 obj_type = 0;
	bool is_unaffiliated;
	u32 obj_id;

	/* Explicit filtering to kernel events which may occur frequently */
	if (event_type == MLX5_EVENT_TYPE_CMD ||
	    event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
		return NOTIFY_OK;

	table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
	dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
	is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);

	if (!is_unaffiliated)
		obj_type = get_event_obj_type(event_type, data);

	rcu_read_lock();
	event = xa_load(&table->event_xa, event_type | (obj_type << 16));
	if (!event) {
		rcu_read_unlock();
		return NOTIFY_DONE;
	}

	if (is_unaffiliated) {
		dispatch_event_fd(&event->unaffiliated_list, data);
		rcu_read_unlock();
		return NOTIFY_OK;
	}

	obj_id = devx_get_obj_id_from_event(event_type, data);
	obj_event = xa_load(&event->object_ids, obj_id);
	if (!obj_event) {
		rcu_read_unlock();
		return NOTIFY_DONE;
	}

	dispatch_event_fd(&obj_event->obj_sub_list, data);

	rcu_read_unlock();
	return NOTIFY_OK;
2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
}

void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
{
	struct mlx5_devx_event_table *table = &dev->devx_event_table;

	xa_init(&table->event_xa);
	mutex_init(&table->event_xa_lock);
	MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
	mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
}

void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
{
	struct mlx5_devx_event_table *table = &dev->devx_event_table;
2427 2428
	struct devx_event_subscription *sub, *tmp;
	struct devx_event *event;
2429 2430 2431 2432
	void *entry;
	unsigned long id;

	mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2433 2434 2435 2436 2437 2438
	mutex_lock(&dev->devx_event_table.event_xa_lock);
	xa_for_each(&table->event_xa, id, entry) {
		event = entry;
		list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list,
					 xa_list)
			devx_cleanup_subscription(dev, sub);
2439
		kfree(entry);
2440 2441
	}
	mutex_unlock(&dev->devx_event_table.event_xa_lock);
2442 2443 2444
	xa_destroy(&table->event_xa);
}

2445 2446 2447
static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
					 size_t count, loff_t *pos)
{
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
	struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
	struct devx_async_data *event;
	int ret = 0;
	size_t eventsz;

	spin_lock_irq(&ev_queue->lock);

	while (list_empty(&ev_queue->event_list)) {
		spin_unlock_irq(&ev_queue->lock);

		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;

		if (wait_event_interruptible(
			    ev_queue->poll_wait,
2464 2465
			    (!list_empty(&ev_queue->event_list) ||
			     ev_queue->is_destroyed))) {
2466 2467
			return -ERESTARTSYS;
		}
2468 2469 2470 2471 2472

		if (list_empty(&ev_queue->event_list) &&
		    ev_queue->is_destroyed)
			return -EIO;

2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
		spin_lock_irq(&ev_queue->lock);
	}

	event = list_entry(ev_queue->event_list.next,
			   struct devx_async_data, list);
	eventsz = event->cmd_out_len +
			sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);

	if (eventsz > count) {
		spin_unlock_irq(&ev_queue->lock);
		return -ENOSPC;
	}

	list_del(ev_queue->event_list.next);
	spin_unlock_irq(&ev_queue->lock);

	if (copy_to_user(buf, &event->hdr, eventsz))
		ret = -EFAULT;
	else
		ret = eventsz;

	atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
	kvfree(event);
	return ret;
2497 2498 2499 2500
}

static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
{
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511
	struct ib_uobject *uobj = filp->private_data;
	struct devx_async_cmd_event_file *comp_ev_file = container_of(
		uobj, struct devx_async_cmd_event_file, uobj);
	struct devx_async_data *entry, *tmp;

	spin_lock_irq(&comp_ev_file->ev_queue.lock);
	list_for_each_entry_safe(entry, tmp,
				 &comp_ev_file->ev_queue.event_list, list)
		kvfree(entry);
	spin_unlock_irq(&comp_ev_file->ev_queue.lock);

2512 2513 2514 2515 2516 2517 2518
	uverbs_close_fd(filp);
	return 0;
}

static __poll_t devx_async_cmd_event_poll(struct file *filp,
					      struct poll_table_struct *wait)
{
2519 2520 2521 2522 2523 2524 2525
	struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
	__poll_t pollflags = 0;

	poll_wait(filp, &ev_queue->poll_wait, wait);

	spin_lock_irq(&ev_queue->lock);
2526 2527 2528
	if (ev_queue->is_destroyed)
		pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
	else if (!list_empty(&ev_queue->event_list))
2529 2530 2531 2532
		pollflags = EPOLLIN | EPOLLRDNORM;
	spin_unlock_irq(&ev_queue->lock);

	return pollflags;
2533 2534
}

2535
static const struct file_operations devx_async_cmd_event_fops = {
2536 2537 2538 2539 2540 2541 2542
	.owner	 = THIS_MODULE,
	.read	 = devx_async_cmd_event_read,
	.poll    = devx_async_cmd_event_poll,
	.release = devx_async_cmd_event_close,
	.llseek	 = no_llseek,
};

2543 2544 2545
static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
				     size_t count, loff_t *pos)
{
2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622
	struct devx_async_event_file *ev_file = filp->private_data;
	struct devx_event_subscription *event_sub;
	struct devx_async_event_data *uninitialized_var(event);
	int ret = 0;
	size_t eventsz;
	bool omit_data;
	void *event_data;

	omit_data = ev_file->omit_data;

	spin_lock_irq(&ev_file->lock);

	if (ev_file->is_overflow_err) {
		ev_file->is_overflow_err = 0;
		spin_unlock_irq(&ev_file->lock);
		return -EOVERFLOW;
	}

	if (ev_file->is_destroyed) {
		spin_unlock_irq(&ev_file->lock);
		return -EIO;
	}

	while (list_empty(&ev_file->event_list)) {
		spin_unlock_irq(&ev_file->lock);

		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;

		if (wait_event_interruptible(ev_file->poll_wait,
			    (!list_empty(&ev_file->event_list) ||
			     ev_file->is_destroyed))) {
			return -ERESTARTSYS;
		}

		spin_lock_irq(&ev_file->lock);
		if (ev_file->is_destroyed) {
			spin_unlock_irq(&ev_file->lock);
			return -EIO;
		}
	}

	if (omit_data) {
		event_sub = list_first_entry(&ev_file->event_list,
					struct devx_event_subscription,
					event_list);
		eventsz = sizeof(event_sub->cookie);
		event_data = &event_sub->cookie;
	} else {
		event = list_first_entry(&ev_file->event_list,
				      struct devx_async_event_data, list);
		eventsz = sizeof(struct mlx5_eqe) +
			sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
		event_data = &event->hdr;
	}

	if (eventsz > count) {
		spin_unlock_irq(&ev_file->lock);
		return -EINVAL;
	}

	if (omit_data)
		list_del_init(&event_sub->event_list);
	else
		list_del(&event->list);

	spin_unlock_irq(&ev_file->lock);

	if (copy_to_user(buf, event_data, eventsz))
		/* This points to an application issue, not a kernel concern */
		ret = -EFAULT;
	else
		ret = eventsz;

	if (!omit_data)
		kfree(event);
	return ret;
2623 2624 2625 2626 2627
}

static __poll_t devx_async_event_poll(struct file *filp,
				      struct poll_table_struct *wait)
{
2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640
	struct devx_async_event_file *ev_file = filp->private_data;
	__poll_t pollflags = 0;

	poll_wait(filp, &ev_file->poll_wait, wait);

	spin_lock_irq(&ev_file->lock);
	if (ev_file->is_destroyed)
		pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
	else if (!list_empty(&ev_file->event_list))
		pollflags = EPOLLIN | EPOLLRDNORM;
	spin_unlock_irq(&ev_file->lock);

	return pollflags;
2641 2642 2643 2644
}

static int devx_async_event_close(struct inode *inode, struct file *filp)
{
2645 2646
	struct devx_async_event_file *ev_file = filp->private_data;
	struct devx_event_subscription *event_sub, *event_sub_tmp;
2647
	struct devx_async_event_data *entry, *tmp;
2648
	struct mlx5_ib_dev *dev = ev_file->dev;
2649

2650
	mutex_lock(&dev->devx_event_table.event_xa_lock);
2651 2652 2653
	/* delete the subscriptions which are related to this FD */
	list_for_each_entry_safe(event_sub, event_sub_tmp,
				 &ev_file->subscribed_events_list, file_list) {
2654
		devx_cleanup_subscription(dev, event_sub);
2655 2656 2657 2658 2659 2660 2661 2662
		if (event_sub->eventfd)
			eventfd_ctx_put(event_sub->eventfd);

		list_del_rcu(&event_sub->file_list);
		/* subscription may not be used by the read API any more */
		kfree_rcu(event_sub, rcu);
	}

2663
	mutex_unlock(&dev->devx_event_table.event_xa_lock);
2664

2665 2666 2667 2668 2669 2670 2671 2672 2673
	/* free the pending events allocation */
	if (!ev_file->omit_data) {
		spin_lock_irq(&ev_file->lock);
		list_for_each_entry_safe(entry, tmp,
					 &ev_file->event_list, list)
			kfree(entry); /* read can't come any more */
		spin_unlock_irq(&ev_file->lock);
	}

2674
	uverbs_close_fd(filp);
2675
	put_device(&dev->ib_dev.dev);
2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686
	return 0;
}

static const struct file_operations devx_async_event_fops = {
	.owner	 = THIS_MODULE,
	.read	 = devx_async_event_read,
	.poll    = devx_async_event_poll,
	.release = devx_async_event_close,
	.llseek	 = no_llseek,
};

2687 2688 2689
static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
						   enum rdma_remove_reason why)
{
2690 2691 2692
	struct devx_async_cmd_event_file *comp_ev_file =
		container_of(uobj, struct devx_async_cmd_event_file,
			     uobj);
2693 2694 2695 2696 2697 2698 2699 2700
	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;

	spin_lock_irq(&ev_queue->lock);
	ev_queue->is_destroyed = 1;
	spin_unlock_irq(&ev_queue->lock);

	if (why == RDMA_REMOVE_DRIVER_REMOVE)
		wake_up_interruptible(&ev_queue->poll_wait);
2701 2702

	mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2703 2704 2705
	return 0;
};

2706 2707 2708
static int devx_hot_unplug_async_event_file(struct ib_uobject *uobj,
					    enum rdma_remove_reason why)
{
2709 2710 2711 2712 2713 2714 2715 2716 2717
	struct devx_async_event_file *ev_file =
		container_of(uobj, struct devx_async_event_file,
			     uobj);

	spin_lock_irq(&ev_file->lock);
	ev_file->is_destroyed = 1;
	spin_unlock_irq(&ev_file->lock);

	wake_up_interruptible(&ev_file->poll_wait);
2718 2719 2720
	return 0;
};

2721 2722 2723 2724 2725
DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_UMEM_REG,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
			MLX5_IB_OBJECT_DEVX_UMEM,
			UVERBS_ACCESS_NEW,
J
Jason Gunthorpe 已提交
2726
			UA_MANDATORY),
2727 2728
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
			   UVERBS_ATTR_TYPE(u64),
J
Jason Gunthorpe 已提交
2729
			   UA_MANDATORY),
2730 2731
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
			   UVERBS_ATTR_TYPE(u64),
J
Jason Gunthorpe 已提交
2732
			   UA_MANDATORY),
2733 2734
	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
			     enum ib_access_flags),
2735 2736
	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
			    UVERBS_ATTR_TYPE(u32),
J
Jason Gunthorpe 已提交
2737
			    UA_MANDATORY));
2738

2739
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2740 2741 2742 2743
	MLX5_IB_METHOD_DEVX_UMEM_DEREG,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
			MLX5_IB_OBJECT_DEVX_UMEM,
			UVERBS_ACCESS_DESTROY,
J
Jason Gunthorpe 已提交
2744
			UA_MANDATORY));
2745 2746 2747 2748 2749

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_QUERY_EQN,
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
			   UVERBS_ATTR_TYPE(u32),
J
Jason Gunthorpe 已提交
2750
			   UA_MANDATORY),
2751 2752
	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
			    UVERBS_ATTR_TYPE(u32),
J
Jason Gunthorpe 已提交
2753
			    UA_MANDATORY));
2754 2755 2756 2757 2758

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_QUERY_UAR,
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
			   UVERBS_ATTR_TYPE(u32),
J
Jason Gunthorpe 已提交
2759
			   UA_MANDATORY),
2760 2761
	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
			    UVERBS_ATTR_TYPE(u32),
J
Jason Gunthorpe 已提交
2762
			    UA_MANDATORY));
2763 2764 2765 2766 2767 2768

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_OTHER,
	UVERBS_ATTR_PTR_IN(
		MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
J
Jason Gunthorpe 已提交
2769 2770
		UA_MANDATORY,
		UA_ALLOC_AND_COPY),
2771 2772 2773
	UVERBS_ATTR_PTR_OUT(
		MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2774
		UA_MANDATORY));
2775 2776 2777 2778 2779 2780

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_OBJ_CREATE,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
			MLX5_IB_OBJECT_DEVX_OBJ,
			UVERBS_ACCESS_NEW,
J
Jason Gunthorpe 已提交
2781
			UA_MANDATORY),
2782 2783 2784
	UVERBS_ATTR_PTR_IN(
		MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
J
Jason Gunthorpe 已提交
2785 2786
		UA_MANDATORY,
		UA_ALLOC_AND_COPY),
2787 2788 2789
	UVERBS_ATTR_PTR_OUT(
		MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2790
		UA_MANDATORY));
2791

2792
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2793 2794 2795 2796
	MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
			MLX5_IB_OBJECT_DEVX_OBJ,
			UVERBS_ACCESS_DESTROY,
J
Jason Gunthorpe 已提交
2797
			UA_MANDATORY));
2798 2799 2800 2801

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2802
			UVERBS_IDR_ANY_OBJECT,
2803
			UVERBS_ACCESS_WRITE,
J
Jason Gunthorpe 已提交
2804
			UA_MANDATORY),
2805 2806 2807
	UVERBS_ATTR_PTR_IN(
		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
J
Jason Gunthorpe 已提交
2808 2809
		UA_MANDATORY,
		UA_ALLOC_AND_COPY),
2810 2811 2812
	UVERBS_ATTR_PTR_OUT(
		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2813
		UA_MANDATORY));
2814 2815 2816 2817

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_OBJ_QUERY,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2818
			UVERBS_IDR_ANY_OBJECT,
2819
			UVERBS_ACCESS_READ,
J
Jason Gunthorpe 已提交
2820
			UA_MANDATORY),
2821 2822 2823
	UVERBS_ATTR_PTR_IN(
		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
J
Jason Gunthorpe 已提交
2824 2825
		UA_MANDATORY,
		UA_ALLOC_AND_COPY),
2826 2827 2828
	UVERBS_ATTR_PTR_OUT(
		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2829
		UA_MANDATORY));
2830

2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851
DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
			UVERBS_IDR_ANY_OBJECT,
			UVERBS_ACCESS_READ,
			UA_MANDATORY),
	UVERBS_ATTR_PTR_IN(
		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
		UA_MANDATORY,
		UA_ALLOC_AND_COPY),
	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
		u16, UA_MANDATORY),
	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
		MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
		UVERBS_ACCESS_READ,
		UA_MANDATORY),
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
		UVERBS_ATTR_TYPE(u64),
		UA_MANDATORY));

2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872
DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
		MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
		UVERBS_ACCESS_READ,
		UA_MANDATORY),
	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
		MLX5_IB_OBJECT_DEVX_OBJ,
		UVERBS_ACCESS_READ,
		UA_OPTIONAL),
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
		UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
		UA_MANDATORY,
		UA_ALLOC_AND_COPY),
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
		UVERBS_ATTR_TYPE(u64),
		UA_OPTIONAL),
	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
		UVERBS_ATTR_TYPE(u32),
		UA_OPTIONAL));

2873
DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
2874 2875
			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
2876 2877
			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
2878

2879
DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
2880 2881 2882 2883
			    UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
2884 2885
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
2886

2887
DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
2888 2889 2890
			    UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
2891

2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907

DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
			MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
			UVERBS_ACCESS_NEW,
			UA_MANDATORY));

DECLARE_UVERBS_NAMED_OBJECT(
	MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
			     devx_hot_unplug_async_cmd_event_file,
			     &devx_async_cmd_event_fops, "[devx_async_cmd]",
			     O_RDONLY),
	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));

2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925
DECLARE_UVERBS_NAMED_METHOD(
	MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
			MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
			UVERBS_ACCESS_NEW,
			UA_MANDATORY),
	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
			enum mlx5_ib_uapi_devx_create_event_channel_flags,
			UA_MANDATORY));

DECLARE_UVERBS_NAMED_OBJECT(
	MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
			     devx_hot_unplug_async_event_file,
			     &devx_async_event_fops, "[devx_async_event]",
			     O_RDONLY),
	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));

2926
static bool devx_is_supported(struct ib_device *device)
Y
Yishai Hadas 已提交
2927
{
2928 2929
	struct mlx5_ib_dev *dev = to_mdev(device);

2930
	return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
Y
Yishai Hadas 已提交
2931
}
2932

2933
const struct uapi_definition mlx5_ib_devx_defs[] = {
2934 2935 2936 2937 2938 2939 2940 2941 2942
	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
		MLX5_IB_OBJECT_DEVX,
		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
		MLX5_IB_OBJECT_DEVX_OBJ,
		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
		MLX5_IB_OBJECT_DEVX_UMEM,
		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2943 2944 2945
	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
		MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2946 2947 2948
	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
		MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2949 2950
	{},
};