uverbs_cmd.c 106.7 KB
Newer Older
1 2
/*
 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3
 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
5
 * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

36
#include <linux/file.h>
37
#include <linux/fs.h>
38
#include <linux/slab.h>
39
#include <linux/sched.h>
40

41
#include <linux/uaccess.h>
42

43 44 45 46
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_std_types.h>
#include "rdma_core.h"

47
#include "uverbs.h"
48
#include "core_priv.h"
49

50 51 52 53 54 55 56 57 58 59 60 61 62
/*
 * Copy a response to userspace. If the provided 'resp' is larger than the
 * user buffer it is silently truncated. If the user provided a larger buffer
 * then the trailing portion is zero filled.
 *
 * These semantics are intended to support future extension of the output
 * structures.
 */
static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
			   size_t resp_len)
{
	int ret;

63 64 65 66
	if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
		return uverbs_copy_to_struct_or_zero(
			attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);

67 68 69 70
	if (copy_to_user(attrs->ucore.outbuf, resp,
			 min(attrs->ucore.outlen, resp_len)))
		return -EFAULT;

71 72 73 74 75 76 77
	if (resp_len < attrs->ucore.outlen) {
		/*
		 * Zero fill any extra memory that user
		 * space might have provided.
		 */
		ret = clear_user(attrs->ucore.outbuf + resp_len,
				 attrs->ucore.outlen - resp_len);
78
		if (ret)
79
			return -EFAULT;
80 81 82 83 84
	}

	return 0;
}

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
/*
 * Copy a request from userspace. If the provided 'req' is larger than the
 * user buffer then the user buffer is zero extended into the 'req'. If 'req'
 * is smaller than the user buffer then the uncopied bytes in the user buffer
 * must be zero.
 */
static int uverbs_request(struct uverbs_attr_bundle *attrs, void *req,
			  size_t req_len)
{
	if (copy_from_user(req, attrs->ucore.inbuf,
			   min(attrs->ucore.inlen, req_len)))
		return -EFAULT;

	if (attrs->ucore.inlen < req_len) {
		memset(req + attrs->ucore.inlen, 0,
		       req_len - attrs->ucore.inlen);
	} else if (attrs->ucore.inlen > req_len) {
		if (!ib_is_buffer_cleared(attrs->ucore.inbuf + req_len,
					  attrs->ucore.inlen - req_len))
			return -EOPNOTSUPP;
	}
	return 0;
}

109 110 111 112 113 114 115 116 117 118 119 120
/*
 * Generate the value for the 'response_length' protocol used by write_ex.
 * This is the number of bytes the kernel actually wrote. Userspace can use
 * this to detect what structure members in the response the kernel
 * understood.
 */
static u32 uverbs_response_length(struct uverbs_attr_bundle *attrs,
				  size_t resp_len)
{
	return min_t(size_t, attrs->ucore.outlen, resp_len);
}

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
/*
 * The iterator version of the request interface is for handlers that need to
 * step over a flex array at the end of a command header.
 */
struct uverbs_req_iter {
	const void __user *cur;
	const void __user *end;
};

static int uverbs_request_start(struct uverbs_attr_bundle *attrs,
				struct uverbs_req_iter *iter,
				void *req,
				size_t req_len)
{
	if (attrs->ucore.inlen < req_len)
		return -ENOSPC;

	if (copy_from_user(req, attrs->ucore.inbuf, req_len))
		return -EFAULT;

	iter->cur = attrs->ucore.inbuf + req_len;
	iter->end = attrs->ucore.inbuf + attrs->ucore.inlen;
	return 0;
}

static int uverbs_request_next(struct uverbs_req_iter *iter, void *val,
			       size_t len)
{
	if (iter->cur + len > iter->end)
		return -ENOSPC;

	if (copy_from_user(val, iter->cur, len))
		return -EFAULT;

	iter->cur += len;
	return 0;
}

159 160 161 162 163 164
static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
						  size_t len)
{
	const void __user *res = iter->cur;

	if (iter->cur + len > iter->end)
165
		return (void __force __user *)ERR_PTR(-ENOSPC);
166 167 168 169
	iter->cur += len;
	return res;
}

170 171 172 173 174 175 176
static int uverbs_request_finish(struct uverbs_req_iter *iter)
{
	if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur))
		return -EOPNOTSUPP;
	return 0;
}

177 178 179 180 181 182 183 184 185 186 187
/*
 * When calling a destroy function during an error unwind we need to pass in
 * the udata that is sanitized of all user arguments. Ie from the driver
 * perspective it looks like no udata was passed.
 */
struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs)
{
	attrs->driver_udata = (struct ib_udata){};
	return &attrs->driver_udata;
}

188
static struct ib_uverbs_completion_event_file *
189
_ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
190
{
191
	struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
192
					       fd, attrs);
193 194 195 196 197 198 199

	if (IS_ERR(uobj))
		return (void *)uobj;

	uverbs_uobject_get(uobj);
	uobj_put_read(uobj);

200 201
	return container_of(uobj, struct ib_uverbs_completion_event_file,
			    uobj);
202
}
203 204
#define ib_uverbs_lookup_comp_file(_fd, _ufile)                                \
	_ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
205

206
int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs)
207
{
208 209
	struct ib_uverbs_file *ufile = attrs->ufile;
	struct ib_ucontext *ucontext;
210
	struct ib_device *ib_dev;
211

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
	ib_dev = srcu_dereference(ufile->device->ib_dev,
				  &ufile->device->disassociate_srcu);
	if (!ib_dev)
		return -EIO;

	ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
	if (!ucontext)
		return -ENOMEM;

	ucontext->res.type = RDMA_RESTRACK_CTX;
	ucontext->device = ib_dev;
	ucontext->ufile = ufile;
	xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
	attrs->context = ucontext;
	return 0;
}

int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
{
	struct ib_ucontext *ucontext = attrs->context;
	struct ib_uverbs_file *file = attrs->ufile;
	int ret;
234

235 236
	if (!down_read_trylock(&file->hw_destroy_rwsem))
		return -EIO;
237
	mutex_lock(&file->ucontext_lock);
238 239 240 241 242
	if (file->ucontext) {
		ret = -EINVAL;
		goto err;
	}

243 244
	ret = ib_rdmacg_try_charge(&ucontext->cg_obj, ucontext->device,
				   RDMACG_RESOURCE_HCA_HANDLE);
245 246 247
	if (ret)
		goto err;

248 249 250 251
	ret = ucontext->device->ops.alloc_ucontext(ucontext,
						   &attrs->driver_udata);
	if (ret)
		goto err_uncharge;
252

253
	rdma_restrack_uadd(&ucontext->res);
254

255 256 257 258 259 260 261 262 263
	/*
	 * Make sure that ib_uverbs_get_ucontext() sees the pointer update
	 * only after all writes to setup the ucontext have completed
	 */
	smp_store_release(&file->ucontext, ucontext);

	mutex_unlock(&file->ucontext_lock);
	up_read(&file->hw_destroy_rwsem);
	return 0;
264

265 266 267 268 269 270 271 272
err_uncharge:
	ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
			   RDMACG_RESOURCE_HCA_HANDLE);
err:
	mutex_unlock(&file->ucontext_lock);
	up_read(&file->hw_destroy_rwsem);
	return ret;
}
273

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
{
	struct ib_uverbs_get_context_resp resp;
	struct ib_uverbs_get_context cmd;
	struct ib_device *ib_dev;
	struct ib_uobject *uobj;
	int ret;

	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;

	ret = ib_alloc_ucontext(attrs);
	if (ret)
		return ret;
289

290 291 292
	uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev);
	if (IS_ERR(uobj)) {
		ret = PTR_ERR(uobj);
293
		goto err_ucontext;
294
	}
295

296 297 298 299
	resp = (struct ib_uverbs_get_context_resp){
		.num_comp_vectors = attrs->ufile->device->num_comp_vectors,
		.async_fd = uobj->id,
	};
300 301
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
302
		goto err_uobj;
303

304
	ret = ib_init_ucontext(attrs);
305
	if (ret)
306
		goto err_uobj;
307

308 309 310
	ib_uverbs_init_async_event_file(
		container_of(uobj, struct ib_uverbs_async_event_file, uobj));
	rdma_alloc_commit_uobject(uobj, attrs);
311
	return 0;
312

313
err_uobj:
314
	rdma_alloc_abort_uobject(uobj, attrs, false);
315 316 317
err_ucontext:
	kfree(attrs->context);
	attrs->context = NULL;
318
	return ret;
319 320
}

321
static void copy_query_dev_fields(struct ib_ucontext *ucontext,
322 323 324
				  struct ib_uverbs_query_device_resp *resp,
				  struct ib_device_attr *attr)
{
325 326
	struct ib_device *ib_dev = ucontext->device;

327
	resp->fw_ver		= attr->fw_ver;
328
	resp->node_guid		= ib_dev->node_guid;
329 330 331 332 333 334 335 336
	resp->sys_image_guid	= attr->sys_image_guid;
	resp->max_mr_size	= attr->max_mr_size;
	resp->page_size_cap	= attr->page_size_cap;
	resp->vendor_id		= attr->vendor_id;
	resp->vendor_part_id	= attr->vendor_part_id;
	resp->hw_ver		= attr->hw_ver;
	resp->max_qp		= attr->max_qp;
	resp->max_qp_wr		= attr->max_qp_wr;
337
	resp->device_cap_flags	= lower_32_bits(attr->device_cap_flags);
338
	resp->max_sge		= min(attr->max_send_sge, attr->max_recv_sge);
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
	resp->max_sge_rd	= attr->max_sge_rd;
	resp->max_cq		= attr->max_cq;
	resp->max_cqe		= attr->max_cqe;
	resp->max_mr		= attr->max_mr;
	resp->max_pd		= attr->max_pd;
	resp->max_qp_rd_atom	= attr->max_qp_rd_atom;
	resp->max_ee_rd_atom	= attr->max_ee_rd_atom;
	resp->max_res_rd_atom	= attr->max_res_rd_atom;
	resp->max_qp_init_rd_atom	= attr->max_qp_init_rd_atom;
	resp->max_ee_init_rd_atom	= attr->max_ee_init_rd_atom;
	resp->atomic_cap		= attr->atomic_cap;
	resp->max_ee			= attr->max_ee;
	resp->max_rdd			= attr->max_rdd;
	resp->max_mw			= attr->max_mw;
	resp->max_raw_ipv6_qp		= attr->max_raw_ipv6_qp;
	resp->max_raw_ethy_qp		= attr->max_raw_ethy_qp;
	resp->max_mcast_grp		= attr->max_mcast_grp;
	resp->max_mcast_qp_attach	= attr->max_mcast_qp_attach;
	resp->max_total_mcast_qp_attach	= attr->max_total_mcast_qp_attach;
	resp->max_ah			= attr->max_ah;
	resp->max_fmr			= attr->max_fmr;
	resp->max_map_per_fmr		= attr->max_map_per_fmr;
	resp->max_srq			= attr->max_srq;
	resp->max_srq_wr		= attr->max_srq_wr;
	resp->max_srq_sge		= attr->max_srq_sge;
	resp->max_pkeys			= attr->max_pkeys;
	resp->local_ca_ack_delay	= attr->local_ca_ack_delay;
366
	resp->phys_port_cnt		= ib_dev->phys_port_cnt;
367 368
}

369
static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs)
370 371 372
{
	struct ib_uverbs_query_device      cmd;
	struct ib_uverbs_query_device_resp resp;
373
	struct ib_ucontext *ucontext;
374
	int ret;
375

376
	ucontext = ib_uverbs_get_ucontext(attrs);
377 378
	if (IS_ERR(ucontext))
		return PTR_ERR(ucontext);
379

380 381 382
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
383 384

	memset(&resp, 0, sizeof resp);
385
	copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs);
386

387
	return uverbs_response(attrs, &resp, sizeof(resp));
388 389
}

390
static int ib_uverbs_query_port(struct uverbs_attr_bundle *attrs)
391 392 393 394 395
{
	struct ib_uverbs_query_port      cmd;
	struct ib_uverbs_query_port_resp resp;
	struct ib_port_attr              attr;
	int                              ret;
396 397 398
	struct ib_ucontext *ucontext;
	struct ib_device *ib_dev;

399
	ucontext = ib_uverbs_get_ucontext(attrs);
400 401 402
	if (IS_ERR(ucontext))
		return PTR_ERR(ucontext);
	ib_dev = ucontext->device;
403

404 405 406
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
407

408
	ret = ib_query_port(ib_dev, cmd.port_num, &attr);
409 410 411 412
	if (ret)
		return ret;

	memset(&resp, 0, sizeof resp);
413
	copy_port_attr_to_resp(&attr, &resp, ib_dev, cmd.port_num);
414

415
	return uverbs_response(attrs, &resp, sizeof(resp));
416 417
}

418
static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
419 420 421 422 423 424
{
	struct ib_uverbs_alloc_pd      cmd;
	struct ib_uverbs_alloc_pd_resp resp;
	struct ib_uobject             *uobj;
	struct ib_pd                  *pd;
	int                            ret;
425
	struct ib_device *ib_dev;
426

427 428 429
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
430

431
	uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev);
432 433
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
434

435 436 437
	pd = rdma_zalloc_drv_obj(ib_dev, ib_pd);
	if (!pd) {
		ret = -ENOMEM;
438 439 440
		goto err;
	}

441
	pd->device  = ib_dev;
442
	pd->uobject = uobj;
443
	pd->__internal_mr = NULL;
444
	atomic_set(&pd->usecnt, 0);
445 446
	pd->res.type = RDMA_RESTRACK_PD;

447
	ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata);
448 449
	if (ret)
		goto err_alloc;
450

451
	uobj->object = pd;
452 453
	memset(&resp, 0, sizeof resp);
	resp.pd_handle = uobj->id;
454
	rdma_restrack_uadd(&pd->res);
455

456 457
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
458
		goto err_copy;
459

460 461
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
462

463
err_copy:
464
	ib_dealloc_pd_user(pd, uverbs_get_cleared_udata(attrs));
465
	pd = NULL;
466 467
err_alloc:
	kfree(pd);
468
err:
469
	uobj_alloc_abort(uobj, attrs);
470 471 472
	return ret;
}

473
static int ib_uverbs_dealloc_pd(struct uverbs_attr_bundle *attrs)
474 475
{
	struct ib_uverbs_dealloc_pd cmd;
476
	int ret;
477

478 479 480
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
481

482
	return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
483 484
}

485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
struct xrcd_table_entry {
	struct rb_node  node;
	struct ib_xrcd *xrcd;
	struct inode   *inode;
};

static int xrcd_table_insert(struct ib_uverbs_device *dev,
			    struct inode *inode,
			    struct ib_xrcd *xrcd)
{
	struct xrcd_table_entry *entry, *scan;
	struct rb_node **p = &dev->xrcd_tree.rb_node;
	struct rb_node *parent = NULL;

	entry = kmalloc(sizeof *entry, GFP_KERNEL);
	if (!entry)
		return -ENOMEM;

	entry->xrcd  = xrcd;
	entry->inode = inode;

	while (*p) {
		parent = *p;
		scan = rb_entry(parent, struct xrcd_table_entry, node);

		if (inode < scan->inode) {
			p = &(*p)->rb_left;
		} else if (inode > scan->inode) {
			p = &(*p)->rb_right;
		} else {
			kfree(entry);
			return -EEXIST;
		}
	}

	rb_link_node(&entry->node, parent, p);
	rb_insert_color(&entry->node, &dev->xrcd_tree);
	igrab(inode);
	return 0;
}

static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
						  struct inode *inode)
{
	struct xrcd_table_entry *entry;
	struct rb_node *p = dev->xrcd_tree.rb_node;

	while (p) {
		entry = rb_entry(p, struct xrcd_table_entry, node);

		if (inode < entry->inode)
			p = p->rb_left;
		else if (inode > entry->inode)
			p = p->rb_right;
		else
			return entry;
	}

	return NULL;
}

static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
{
	struct xrcd_table_entry *entry;

	entry = xrcd_table_search(dev, inode);
	if (!entry)
		return NULL;

	return entry->xrcd;
}

static void xrcd_table_delete(struct ib_uverbs_device *dev,
			      struct inode *inode)
{
	struct xrcd_table_entry *entry;

	entry = xrcd_table_search(dev, inode);
	if (entry) {
		iput(inode);
		rb_erase(&entry->node, &dev->xrcd_tree);
		kfree(entry);
	}
}

570
static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
571
{
572
	struct ib_uverbs_device *ibudev = attrs->ufile->device;
573 574 575 576
	struct ib_uverbs_open_xrcd	cmd;
	struct ib_uverbs_open_xrcd_resp	resp;
	struct ib_uxrcd_object         *obj;
	struct ib_xrcd                 *xrcd = NULL;
577
	struct fd			f = {NULL, 0};
578
	struct inode                   *inode = NULL;
579
	int				ret = 0;
580
	int				new_xrcd = 0;
581
	struct ib_device *ib_dev;
582

583 584 585
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
586

587
	mutex_lock(&ibudev->xrcd_tree_mutex);
588 589 590

	if (cmd.fd != -1) {
		/* search for file descriptor */
591 592
		f = fdget(cmd.fd);
		if (!f.file) {
593 594 595 596
			ret = -EBADF;
			goto err_tree_mutex_unlock;
		}

A
Al Viro 已提交
597
		inode = file_inode(f.file);
598
		xrcd = find_xrcd(ibudev, inode);
599 600 601 602 603 604 605 606 607 608 609 610
		if (!xrcd && !(cmd.oflags & O_CREAT)) {
			/* no file descriptor. Need CREATE flag */
			ret = -EAGAIN;
			goto err_tree_mutex_unlock;
		}

		if (xrcd && cmd.oflags & O_EXCL) {
			ret = -EINVAL;
			goto err_tree_mutex_unlock;
		}
	}

611
	obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, attrs,
612
						   &ib_dev);
613 614
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
615 616 617 618
		goto err_tree_mutex_unlock;
	}

	if (!xrcd) {
619
		xrcd = ib_dev->ops.alloc_xrcd(ib_dev, &attrs->driver_udata);
620 621 622 623 624 625
		if (IS_ERR(xrcd)) {
			ret = PTR_ERR(xrcd);
			goto err;
		}

		xrcd->inode   = inode;
626
		xrcd->device  = ib_dev;
627 628 629 630 631 632 633 634 635 636 637 638 639 640
		atomic_set(&xrcd->usecnt, 0);
		mutex_init(&xrcd->tgt_qp_mutex);
		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
		new_xrcd = 1;
	}

	atomic_set(&obj->refcnt, 0);
	obj->uobject.object = xrcd;
	memset(&resp, 0, sizeof resp);
	resp.xrcd_handle = obj->uobject.id;

	if (inode) {
		if (new_xrcd) {
			/* create new inode/xrcd table entry */
641
			ret = xrcd_table_insert(ibudev, inode, xrcd);
642
			if (ret)
643
				goto err_dealloc_xrcd;
644 645 646 647
		}
		atomic_inc(&xrcd->usecnt);
	}

648 649
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
650 651
		goto err_copy;

652 653
	if (f.file)
		fdput(f);
654

655
	mutex_unlock(&ibudev->xrcd_tree_mutex);
656

657 658
	rdma_alloc_commit_uobject(&obj->uobject, attrs);
	return 0;
659 660 661 662

err_copy:
	if (inode) {
		if (new_xrcd)
663
			xrcd_table_delete(ibudev, inode);
664 665 666
		atomic_dec(&xrcd->usecnt);
	}

667
err_dealloc_xrcd:
668
	ib_dealloc_xrcd(xrcd, uverbs_get_cleared_udata(attrs));
669 670

err:
671
	uobj_alloc_abort(&obj->uobject, attrs);
672 673

err_tree_mutex_unlock:
674 675
	if (f.file)
		fdput(f);
676

677
	mutex_unlock(&ibudev->xrcd_tree_mutex);
678 679 680 681

	return ret;
}

682
static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs)
683 684
{
	struct ib_uverbs_close_xrcd cmd;
685
	int ret;
686

687 688 689
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
690

691
	return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs);
692 693
}

694
int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
695 696
			   enum rdma_remove_reason why,
			   struct uverbs_attr_bundle *attrs)
697 698
{
	struct inode *inode;
699
	int ret;
700
	struct ib_uverbs_device *dev = attrs->ufile->device;
701 702 703

	inode = xrcd->inode;
	if (inode && !atomic_dec_and_test(&xrcd->usecnt))
704
		return 0;
705

706
	ret = ib_dealloc_xrcd(xrcd, &attrs->driver_udata);
707

708
	if (ib_is_destroy_retryable(ret, why, uobject)) {
709
		atomic_inc(&xrcd->usecnt);
710 711 712 713
		return ret;
	}

	if (inode)
714
		xrcd_table_delete(dev, inode);
715 716

	return ret;
717 718
}

719
static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
720 721 722
{
	struct ib_uverbs_reg_mr      cmd;
	struct ib_uverbs_reg_mr_resp resp;
723
	struct ib_uobject           *uobj;
724 725 726
	struct ib_pd                *pd;
	struct ib_mr                *mr;
	int                          ret;
727
	struct ib_device *ib_dev;
728

729 730 731
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
732 733 734 735

	if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
		return -EINVAL;

736 737 738
	ret = ib_check_mr_access(cmd.access_flags);
	if (ret)
		return ret;
739

740
	uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
741 742
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
743

744
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
745 746
	if (!pd) {
		ret = -EINVAL;
747
		goto err_free;
748
	}
749

750
	if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
751 752
		if (!(pd->device->attrs.device_cap_flags &
		      IB_DEVICE_ON_DEMAND_PAGING)) {
753 754 755 756 757 758
			pr_debug("ODP support not available\n");
			ret = -EINVAL;
			goto err_put;
		}
	}

K
Kamal Heib 已提交
759 760 761
	mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
					 cmd.access_flags,
					 &attrs->driver_udata);
762 763
	if (IS_ERR(mr)) {
		ret = PTR_ERR(mr);
764
		goto err_put;
765 766 767 768
	}

	mr->device  = pd->device;
	mr->pd      = pd;
769
	mr->type    = IB_MR_TYPE_USER;
770
	mr->dm	    = NULL;
771
	mr->sig_attrs = NULL;
772
	mr->uobject = uobj;
773
	atomic_inc(&pd->usecnt);
774
	mr->res.type = RDMA_RESTRACK_MR;
775
	rdma_restrack_uadd(&mr->res);
776

777
	uobj->object = mr;
778

779 780 781
	memset(&resp, 0, sizeof resp);
	resp.lkey      = mr->lkey;
	resp.rkey      = mr->rkey;
782
	resp.mr_handle = uobj->id;
783

784 785
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
786
		goto err_copy;
787

788
	uobj_put_obj_read(pd);
789

790 791
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
792

793
err_copy:
794
	ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
795

796
err_put:
797
	uobj_put_obj_read(pd);
798 799

err_free:
800
	uobj_alloc_abort(uobj, attrs);
801 802 803
	return ret;
}

804
static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
805 806 807 808 809 810 811 812 813
{
	struct ib_uverbs_rereg_mr      cmd;
	struct ib_uverbs_rereg_mr_resp resp;
	struct ib_pd                *pd = NULL;
	struct ib_mr                *mr;
	struct ib_pd		    *old_pd;
	int                          ret;
	struct ib_uobject	    *uobj;

814 815 816
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
817 818 819 820 821 822 823 824 825

	if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
		return -EINVAL;

	if ((cmd.flags & IB_MR_REREG_TRANS) &&
	    (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
	     (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
			return -EINVAL;

826
	uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
827 828
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
829 830 831

	mr = uobj->object;

832 833 834 835 836
	if (mr->dm) {
		ret = -EINVAL;
		goto put_uobjs;
	}

837 838 839 840 841 842 843
	if (cmd.flags & IB_MR_REREG_ACCESS) {
		ret = ib_check_mr_access(cmd.access_flags);
		if (ret)
			goto put_uobjs;
	}

	if (cmd.flags & IB_MR_REREG_PD) {
844
		pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
845
				       attrs);
846 847 848 849 850 851
		if (!pd) {
			ret = -EINVAL;
			goto put_uobjs;
		}
	}

852
	old_pd = mr->pd;
K
Kamal Heib 已提交
853 854 855 856
	ret = mr->device->ops.rereg_user_mr(mr, cmd.flags, cmd.start,
					    cmd.length, cmd.hca_va,
					    cmd.access_flags, pd,
					    &attrs->driver_udata);
857
	if (ret)
858
		goto put_uobj_pd;
859 860 861 862 863

	if (cmd.flags & IB_MR_REREG_PD) {
		atomic_inc(&pd->usecnt);
		mr->pd = pd;
		atomic_dec(&old_pd->usecnt);
864 865 866 867 868 869
	}

	memset(&resp, 0, sizeof(resp));
	resp.lkey      = mr->lkey;
	resp.rkey      = mr->rkey;

870
	ret = uverbs_response(attrs, &resp, sizeof(resp));
871 872 873

put_uobj_pd:
	if (cmd.flags & IB_MR_REREG_PD)
874
		uobj_put_obj_read(pd);
875 876

put_uobjs:
877
	uobj_put_write(uobj);
878 879 880 881

	return ret;
}

882
static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs)
883 884
{
	struct ib_uverbs_dereg_mr cmd;
885
	int ret;
886

887 888 889
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
890

891
	return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
892 893
}

894
static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
895 896 897 898 899 900 901
{
	struct ib_uverbs_alloc_mw      cmd;
	struct ib_uverbs_alloc_mw_resp resp;
	struct ib_uobject             *uobj;
	struct ib_pd                  *pd;
	struct ib_mw                  *mw;
	int                            ret;
902
	struct ib_device *ib_dev;
903

904 905 906
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
907

908
	uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev);
909 910
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
911

912
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
913 914 915 916 917
	if (!pd) {
		ret = -EINVAL;
		goto err_free;
	}

918 919 920 921 922
	if (cmd.mw_type != IB_MW_TYPE_1 && cmd.mw_type != IB_MW_TYPE_2) {
		ret = -EINVAL;
		goto err_put;
	}

K
Kamal Heib 已提交
923
	mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
	if (IS_ERR(mw)) {
		ret = PTR_ERR(mw);
		goto err_put;
	}

	mw->device  = pd->device;
	mw->pd      = pd;
	mw->uobject = uobj;
	atomic_inc(&pd->usecnt);

	uobj->object = mw;

	memset(&resp, 0, sizeof(resp));
	resp.rkey      = mw->rkey;
	resp.mw_handle = uobj->id;

940 941
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
942 943
		goto err_copy;

944
	uobj_put_obj_read(pd);
945 946
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
947 948

err_copy:
949
	uverbs_dealloc_mw(mw);
950
err_put:
951
	uobj_put_obj_read(pd);
952
err_free:
953
	uobj_alloc_abort(uobj, attrs);
954 955 956
	return ret;
}

957
static int ib_uverbs_dealloc_mw(struct uverbs_attr_bundle *attrs)
958 959
{
	struct ib_uverbs_dealloc_mw cmd;
960
	int ret;
961

962 963 964
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
965

966
	return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, attrs);
967 968
}

969
static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs)
970 971 972
{
	struct ib_uverbs_create_comp_channel	   cmd;
	struct ib_uverbs_create_comp_channel_resp  resp;
973 974
	struct ib_uobject			  *uobj;
	struct ib_uverbs_completion_event_file	  *ev_file;
975
	struct ib_device *ib_dev;
976
	int ret;
977

978 979 980
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
981

982
	uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev);
983 984
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
985

986 987 988
	resp.fd = uobj->id;

	ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
989
			       uobj);
990
	ib_uverbs_init_event_queue(&ev_file->ev_queue);
991

992 993
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret) {
994
		uobj_alloc_abort(uobj, attrs);
995
		return ret;
996 997
	}

998 999
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
1000 1001
}

1002
static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
1003
				       struct ib_uverbs_ex_create_cq *cmd)
1004
{
1005
	struct ib_ucq_object           *obj;
1006
	struct ib_uverbs_completion_event_file    *ev_file = NULL;
1007 1008
	struct ib_cq                   *cq;
	int                             ret;
1009
	struct ib_uverbs_ex_create_cq_resp resp;
1010
	struct ib_cq_init_attr attr = {};
1011
	struct ib_device *ib_dev;
1012

1013
	if (cmd->comp_vector >= attrs->ufile->device->num_comp_vectors)
1014
		return ERR_PTR(-EINVAL);
1015

1016
	obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, attrs,
1017
						 &ib_dev);
1018 1019
	if (IS_ERR(obj))
		return obj;
1020

1021
	if (cmd->comp_channel >= 0) {
1022
		ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, attrs);
1023 1024
		if (IS_ERR(ev_file)) {
			ret = PTR_ERR(ev_file);
1025 1026 1027 1028
			goto err;
		}
	}

1029
	obj->uevent.uobject.user_handle = cmd->user_handle;
1030
	INIT_LIST_HEAD(&obj->comp_list);
1031
	INIT_LIST_HEAD(&obj->uevent.event_list);
1032

1033 1034
	attr.cqe = cmd->cqe;
	attr.comp_vector = cmd->comp_vector;
1035
	attr.flags = cmd->flags;
1036

1037 1038 1039
	cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
	if (!cq) {
		ret = -ENOMEM;
1040
		goto err_file;
1041
	}
1042
	cq->device        = ib_dev;
1043
	cq->uobject       = obj;
1044 1045
	cq->comp_handler  = ib_uverbs_comp_handler;
	cq->event_handler = ib_uverbs_cq_event_handler;
1046
	cq->cq_context    = ev_file ? &ev_file->ev_queue : NULL;
1047 1048
	atomic_set(&cq->usecnt, 0);

1049 1050 1051 1052
	ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata);
	if (ret)
		goto err_free;

1053
	obj->uevent.uobject.object = cq;
1054 1055 1056 1057
	obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
	if (obj->uevent.event_file)
		uverbs_uobject_get(&obj->uevent.event_file->uobj);

1058
	memset(&resp, 0, sizeof resp);
1059
	resp.base.cq_handle = obj->uevent.uobject.id;
1060
	resp.base.cqe       = cq->cqe;
1061
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1062

1063
	cq->res.type = RDMA_RESTRACK_CQ;
1064
	rdma_restrack_uadd(&cq->res);
1065

1066
	ret = uverbs_response(attrs, &resp, sizeof(resp));
1067 1068
	if (ret)
		goto err_cb;
1069

1070
	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
1071
	return obj;
1072

1073
err_cb:
1074 1075
	if (obj->uevent.event_file)
		uverbs_uobject_put(&obj->uevent.event_file->uobj);
1076
	ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
1077 1078 1079
	cq = NULL;
err_free:
	kfree(cq);
1080
err_file:
1081
	if (ev_file)
1082
		ib_uverbs_release_ucq(ev_file, obj);
1083 1084

err:
1085
	uobj_alloc_abort(&obj->uevent.uobject, attrs);
1086 1087 1088 1089

	return ERR_PTR(ret);
}

1090
static int ib_uverbs_create_cq(struct uverbs_attr_bundle *attrs)
1091 1092 1093 1094
{
	struct ib_uverbs_create_cq      cmd;
	struct ib_uverbs_ex_create_cq	cmd_ex;
	struct ib_ucq_object           *obj;
1095
	int ret;
1096

1097 1098 1099
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1100 1101 1102 1103 1104 1105 1106

	memset(&cmd_ex, 0, sizeof(cmd_ex));
	cmd_ex.user_handle = cmd.user_handle;
	cmd_ex.cqe = cmd.cqe;
	cmd_ex.comp_vector = cmd.comp_vector;
	cmd_ex.comp_channel = cmd.comp_channel;

1107
	obj = create_cq(attrs, &cmd_ex);
1108
	return PTR_ERR_OR_ZERO(obj);
1109 1110
}

1111
static int ib_uverbs_ex_create_cq(struct uverbs_attr_bundle *attrs)
1112 1113 1114
{
	struct ib_uverbs_ex_create_cq  cmd;
	struct ib_ucq_object           *obj;
1115
	int ret;
1116

1117 1118 1119
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1120 1121 1122 1123 1124 1125 1126

	if (cmd.comp_mask)
		return -EINVAL;

	if (cmd.reserved)
		return -EINVAL;

1127
	obj = create_cq(attrs, &cmd);
G
Gomonovych, Vasyl 已提交
1128
	return PTR_ERR_OR_ZERO(obj);
1129 1130
}

1131
static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
1132 1133
{
	struct ib_uverbs_resize_cq	cmd;
1134
	struct ib_uverbs_resize_cq_resp	resp = {};
1135 1136 1137
	struct ib_cq			*cq;
	int				ret = -EINVAL;

1138 1139 1140
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1141

1142
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1143 1144
	if (!cq)
		return -EINVAL;
1145

K
Kamal Heib 已提交
1146
	ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
1147 1148 1149 1150 1151
	if (ret)
		goto out;

	resp.cqe = cq->cqe;

1152
	ret = uverbs_response(attrs, &resp, sizeof(resp));
1153
out:
1154 1155
	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
1156

1157
	return ret;
1158 1159
}

1160 1161
static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
			   struct ib_wc *wc)
1162 1163 1164 1165 1166 1167 1168 1169
{
	struct ib_uverbs_wc tmp;

	tmp.wr_id		= wc->wr_id;
	tmp.status		= wc->status;
	tmp.opcode		= wc->opcode;
	tmp.vendor_err		= wc->vendor_err;
	tmp.byte_len		= wc->byte_len;
1170
	tmp.ex.imm_data		= wc->ex.imm_data;
1171 1172 1173 1174
	tmp.qp_num		= wc->qp->qp_num;
	tmp.src_qp		= wc->src_qp;
	tmp.wc_flags		= wc->wc_flags;
	tmp.pkey_index		= wc->pkey_index;
1175
	if (rdma_cap_opa_ah(ib_dev, wc->port_num))
H
Hiatt, Don 已提交
1176
		tmp.slid	= OPA_TO_IB_UCAST_LID(wc->slid);
1177
	else
H
Hiatt, Don 已提交
1178
		tmp.slid	= ib_lid_cpu16(wc->slid);
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	tmp.sl			= wc->sl;
	tmp.dlid_path_bits	= wc->dlid_path_bits;
	tmp.port_num		= wc->port_num;
	tmp.reserved		= 0;

	if (copy_to_user(dest, &tmp, sizeof tmp))
		return -EFAULT;

	return 0;
}

1190
static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
1191 1192
{
	struct ib_uverbs_poll_cq       cmd;
1193 1194 1195
	struct ib_uverbs_poll_cq_resp  resp;
	u8 __user                     *header_ptr;
	u8 __user                     *data_ptr;
1196
	struct ib_cq                  *cq;
1197 1198
	struct ib_wc                   wc;
	int                            ret;
1199

1200 1201 1202
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1203

1204
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1205 1206
	if (!cq)
		return -EINVAL;
1207

1208
	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
1209
	header_ptr = attrs->ucore.outbuf;
1210
	data_ptr = header_ptr + sizeof resp;
1211

1212 1213 1214 1215 1216 1217 1218 1219
	memset(&resp, 0, sizeof resp);
	while (resp.count < cmd.ne) {
		ret = ib_poll_cq(cq, 1, &wc);
		if (ret < 0)
			goto out_put;
		if (!ret)
			break;

1220
		ret = copy_wc_to_user(cq->device, data_ptr, &wc);
1221 1222 1223 1224 1225
		if (ret)
			goto out_put;

		data_ptr += sizeof(struct ib_uverbs_wc);
		++resp.count;
1226 1227
	}

1228
	if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1229
		ret = -EFAULT;
1230 1231
		goto out_put;
	}
1232
	ret = 0;
1233

1234 1235 1236
	if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
		ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);

1237
out_put:
1238 1239
	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
1240
	return ret;
1241 1242
}

1243
static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
1244 1245 1246
{
	struct ib_uverbs_req_notify_cq cmd;
	struct ib_cq                  *cq;
1247
	int ret;
1248

1249 1250 1251
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1252

1253
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1254
	if (!cq)
1255
		return -EINVAL;
1256

1257 1258 1259
	ib_req_notify_cq(cq, cmd.solicited_only ?
			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);

1260 1261
	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
1262
	return 0;
1263 1264
}

1265
static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs)
1266
{
1267 1268
	struct ib_uverbs_destroy_cq      cmd;
	struct ib_uverbs_destroy_cq_resp resp;
1269 1270
	struct ib_uobject		*uobj;
	struct ib_ucq_object        	*obj;
1271
	int ret;
1272

1273 1274 1275
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1276

1277
	uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1278 1279 1280
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);

1281
	obj = container_of(uobj, struct ib_ucq_object, uevent.uobject);
1282
	memset(&resp, 0, sizeof(resp));
1283
	resp.comp_events_reported  = obj->comp_events_reported;
1284
	resp.async_events_reported = obj->uevent.events_reported;
1285

1286 1287
	uobj_put_destroy(uobj);

1288
	return uverbs_response(attrs, &resp, sizeof(resp));
1289 1290
}

1291
static int create_qp(struct uverbs_attr_bundle *attrs,
1292
		     struct ib_uverbs_ex_create_qp *cmd)
1293
{
1294 1295 1296 1297
	struct ib_uqp_object		*obj;
	struct ib_device		*device;
	struct ib_pd			*pd = NULL;
	struct ib_xrcd			*xrcd = NULL;
1298
	struct ib_uobject		*xrcd_uobj = ERR_PTR(-ENOENT);
1299 1300 1301
	struct ib_cq			*scq = NULL, *rcq = NULL;
	struct ib_srq			*srq = NULL;
	struct ib_qp			*qp;
1302
	struct ib_qp_init_attr		attr = {};
1303 1304
	struct ib_uverbs_ex_create_qp_resp resp;
	int				ret;
1305 1306
	struct ib_rwq_ind_table *ind_tbl = NULL;
	bool has_sq = true;
1307
	struct ib_device *ib_dev;
1308 1309

	if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
O
Or Gerlitz 已提交
1310 1311
		return -EPERM;

1312
	obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1313
						 &ib_dev);
1314 1315 1316 1317
	if (IS_ERR(obj))
		return PTR_ERR(obj);
	obj->uxrcd = NULL;
	obj->uevent.uobject.user_handle = cmd->user_handle;
1318
	mutex_init(&obj->mcast_lock);
1319

1320
	if (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE) {
1321 1322
		ind_tbl = uobj_get_obj_read(rwq_ind_table,
					    UVERBS_OBJECT_RWQ_IND_TBL,
1323
					    cmd->rwq_ind_tbl_handle, attrs);
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
		if (!ind_tbl) {
			ret = -EINVAL;
			goto err_put;
		}

		attr.rwq_ind_tbl = ind_tbl;
	}

	if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
		ret = -EINVAL;
		goto err_put;
	}

	if (ind_tbl && !cmd->max_send_wr)
		has_sq = false;
1339

1340
	if (cmd->qp_type == IB_QPT_XRC_TGT) {
1341
		xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
1342
					  attrs);
1343 1344 1345 1346 1347 1348 1349

		if (IS_ERR(xrcd_uobj)) {
			ret = -EINVAL;
			goto err_put;
		}

		xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1350 1351 1352 1353 1354
		if (!xrcd) {
			ret = -EINVAL;
			goto err_put;
		}
		device = xrcd->device;
1355
	} else {
1356 1357 1358
		if (cmd->qp_type == IB_QPT_XRC_INI) {
			cmd->max_recv_wr = 0;
			cmd->max_recv_sge = 0;
1359
		} else {
1360
			if (cmd->is_srq) {
1361
				srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
1362
							cmd->srq_handle, attrs);
1363
				if (!srq || srq->srq_type == IB_SRQT_XRC) {
1364 1365 1366 1367
					ret = -EINVAL;
					goto err_put;
				}
			}
1368

1369 1370
			if (!ind_tbl) {
				if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1371 1372
					rcq = uobj_get_obj_read(
						cq, UVERBS_OBJECT_CQ,
1373
						cmd->recv_cq_handle, attrs);
1374 1375 1376 1377
					if (!rcq) {
						ret = -EINVAL;
						goto err_put;
					}
1378
				}
1379 1380
			}
		}
1381

1382
		if (has_sq)
1383
			scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
1384
						cmd->send_cq_handle, attrs);
1385 1386
		if (!ind_tbl)
			rcq = rcq ?: scq;
1387
		pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
1388
				       attrs);
1389
		if (!pd || (!scq && has_sq)) {
1390 1391 1392 1393
			ret = -EINVAL;
			goto err_put;
		}

1394
		device = pd->device;
1395 1396
	}

1397 1398 1399
	attr.event_handler = ib_uverbs_qp_event_handler;
	attr.send_cq       = scq;
	attr.recv_cq       = rcq;
1400
	attr.srq           = srq;
1401
	attr.xrcd	   = xrcd;
1402 1403 1404
	attr.sq_sig_type   = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
					      IB_SIGNAL_REQ_WR;
	attr.qp_type       = cmd->qp_type;
1405
	attr.create_flags  = 0;
1406

1407 1408 1409 1410 1411
	attr.cap.max_send_wr     = cmd->max_send_wr;
	attr.cap.max_recv_wr     = cmd->max_recv_wr;
	attr.cap.max_send_sge    = cmd->max_send_sge;
	attr.cap.max_recv_sge    = cmd->max_recv_sge;
	attr.cap.max_inline_data = cmd->max_inline_data;
1412

1413 1414
	INIT_LIST_HEAD(&obj->uevent.event_list);
	INIT_LIST_HEAD(&obj->mcast_list);
1415

1416
	attr.create_flags = cmd->create_flags;
1417 1418 1419
	if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
				IB_QP_CREATE_CROSS_CHANNEL |
				IB_QP_CREATE_MANAGED_SEND |
1420
				IB_QP_CREATE_MANAGED_RECV |
1421
				IB_QP_CREATE_SCATTER_FCS |
1422
				IB_QP_CREATE_CVLAN_STRIPPING |
1423 1424
				IB_QP_CREATE_SOURCE_QPN |
				IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
1425 1426 1427 1428
		ret = -EINVAL;
		goto err_put;
	}

1429 1430 1431 1432 1433 1434 1435 1436 1437
	if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
		if (!capable(CAP_NET_RAW)) {
			ret = -EPERM;
			goto err_put;
		}

		attr.source_qpn = cmd->source_qpn;
	}

1438
	if (cmd->qp_type == IB_QPT_XRC_TGT)
1439 1440
		qp = ib_create_qp(pd, &attr);
	else
1441
		qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
1442
				   obj);
1443

1444 1445
	if (IS_ERR(qp)) {
		ret = PTR_ERR(qp);
1446
		goto err_put;
1447 1448
	}

1449
	if (cmd->qp_type != IB_QPT_XRC_TGT) {
1450 1451 1452 1453
		ret = ib_create_qp_security(qp, device);
		if (ret)
			goto err_cb;

1454
		atomic_inc(&pd->usecnt);
1455 1456
		if (attr.send_cq)
			atomic_inc(&attr.send_cq->usecnt);
1457 1458 1459 1460
		if (attr.recv_cq)
			atomic_inc(&attr.recv_cq->usecnt);
		if (attr.srq)
			atomic_inc(&attr.srq->usecnt);
1461 1462
		if (ind_tbl)
			atomic_inc(&ind_tbl->usecnt);
1463 1464
	} else {
		/* It is done in _ib_create_qp for other QP types */
1465
		qp->uobject = obj;
1466
	}
1467

1468
	obj->uevent.uobject.object = qp;
1469 1470 1471
	obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
	if (obj->uevent.event_file)
		uverbs_uobject_get(&obj->uevent.event_file->uobj);
1472

1473
	memset(&resp, 0, sizeof resp);
1474 1475 1476 1477 1478 1479 1480
	resp.base.qpn             = qp->qp_num;
	resp.base.qp_handle       = obj->uevent.uobject.id;
	resp.base.max_recv_sge    = attr.cap.max_recv_sge;
	resp.base.max_send_sge    = attr.cap.max_send_sge;
	resp.base.max_recv_wr     = attr.cap.max_recv_wr;
	resp.base.max_send_wr     = attr.cap.max_send_wr;
	resp.base.max_inline_data = attr.cap.max_inline_data;
1481
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1482

1483
	ret = uverbs_response(attrs, &resp, sizeof(resp));
1484
	if (ret)
1485
		goto err_uevent;
1486

1487 1488 1489 1490
	if (xrcd) {
		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
					  uobject);
		atomic_inc(&obj->uxrcd->refcnt);
1491
		uobj_put_read(xrcd_uobj);
1492 1493
	}

1494
	if (pd)
1495
		uobj_put_obj_read(pd);
1496
	if (scq)
1497 1498
		rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
1499
	if (rcq && rcq != scq)
1500 1501
		rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
1502
	if (srq)
1503 1504
		rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
1505
	if (ind_tbl)
1506
		uobj_put_obj_read(ind_tbl);
1507

1508 1509
	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
	return 0;
1510 1511 1512
err_uevent:
	if (obj->uevent.event_file)
		uverbs_uobject_put(&obj->uevent.event_file->uobj);
1513
err_cb:
1514
	ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
1515

1516
err_put:
1517 1518
	if (!IS_ERR(xrcd_uobj))
		uobj_put_read(xrcd_uobj);
1519
	if (pd)
1520
		uobj_put_obj_read(pd);
1521
	if (scq)
1522 1523
		rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
R
Roland Dreier 已提交
1524
	if (rcq && rcq != scq)
1525 1526
		rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
1527
	if (srq)
1528 1529
		rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
1530
	if (ind_tbl)
1531
		uobj_put_obj_read(ind_tbl);
1532

1533
	uobj_alloc_abort(&obj->uevent.uobject, attrs);
1534 1535 1536
	return ret;
}

1537
static int ib_uverbs_create_qp(struct uverbs_attr_bundle *attrs)
1538 1539 1540
{
	struct ib_uverbs_create_qp      cmd;
	struct ib_uverbs_ex_create_qp	cmd_ex;
1541
	int ret;
1542

1543 1544 1545
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561

	memset(&cmd_ex, 0, sizeof(cmd_ex));
	cmd_ex.user_handle = cmd.user_handle;
	cmd_ex.pd_handle = cmd.pd_handle;
	cmd_ex.send_cq_handle = cmd.send_cq_handle;
	cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
	cmd_ex.srq_handle = cmd.srq_handle;
	cmd_ex.max_send_wr = cmd.max_send_wr;
	cmd_ex.max_recv_wr = cmd.max_recv_wr;
	cmd_ex.max_send_sge = cmd.max_send_sge;
	cmd_ex.max_recv_sge = cmd.max_recv_sge;
	cmd_ex.max_inline_data = cmd.max_inline_data;
	cmd_ex.sq_sig_all = cmd.sq_sig_all;
	cmd_ex.qp_type = cmd.qp_type;
	cmd_ex.is_srq = cmd.is_srq;

1562
	return create_qp(attrs, &cmd_ex);
1563 1564
}

1565
static int ib_uverbs_ex_create_qp(struct uverbs_attr_bundle *attrs)
1566
{
1567 1568
	struct ib_uverbs_ex_create_qp cmd;
	int ret;
1569

1570 1571 1572
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1573

1574
	if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
1575 1576 1577 1578 1579
		return -EINVAL;

	if (cmd.reserved)
		return -EINVAL;

1580
	return create_qp(attrs, &cmd);
1581 1582
}

1583
static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
1584 1585 1586 1587 1588 1589 1590
{
	struct ib_uverbs_open_qp        cmd;
	struct ib_uverbs_create_qp_resp resp;
	struct ib_uqp_object           *obj;
	struct ib_xrcd		       *xrcd;
	struct ib_uobject	       *uninitialized_var(xrcd_uobj);
	struct ib_qp                   *qp;
1591
	struct ib_qp_open_attr          attr = {};
1592
	int ret;
1593
	struct ib_device *ib_dev;
1594

1595 1596 1597
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1598

1599
	obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1600
						 &ib_dev);
1601 1602
	if (IS_ERR(obj))
		return PTR_ERR(obj);
1603

1604
	xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, attrs);
1605 1606 1607 1608
	if (IS_ERR(xrcd_uobj)) {
		ret = -EINVAL;
		goto err_put;
	}
1609

1610
	xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1611 1612
	if (!xrcd) {
		ret = -EINVAL;
1613
		goto err_xrcd;
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
	}

	attr.event_handler = ib_uverbs_qp_event_handler;
	attr.qp_num        = cmd.qpn;
	attr.qp_type       = cmd.qp_type;

	INIT_LIST_HEAD(&obj->uevent.event_list);
	INIT_LIST_HEAD(&obj->mcast_list);

	qp = ib_open_qp(xrcd, &attr);
	if (IS_ERR(qp)) {
		ret = PTR_ERR(qp);
1626
		goto err_xrcd;
1627 1628 1629
	}

	obj->uevent.uobject.object = qp;
1630
	obj->uevent.uobject.user_handle = cmd.user_handle;
1631 1632 1633 1634 1635

	memset(&resp, 0, sizeof resp);
	resp.qpn       = qp->qp_num;
	resp.qp_handle = obj->uevent.uobject.id;

1636 1637
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
1638
		goto err_destroy;
1639

1640 1641
	obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
	atomic_inc(&obj->uxrcd->refcnt);
1642
	qp->uobject = obj;
1643
	uobj_put_read(xrcd_uobj);
1644

1645 1646
	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
	return 0;
1647 1648

err_destroy:
1649
	ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
1650 1651
err_xrcd:
	uobj_put_read(xrcd_uobj);
1652
err_put:
1653
	uobj_alloc_abort(&obj->uevent.uobject, attrs);
1654 1655 1656
	return ret;
}

1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
				   struct rdma_ah_attr *rdma_attr)
{
	const struct ib_global_route   *grh;

	uverb_attr->dlid              = rdma_ah_get_dlid(rdma_attr);
	uverb_attr->sl                = rdma_ah_get_sl(rdma_attr);
	uverb_attr->src_path_bits     = rdma_ah_get_path_bits(rdma_attr);
	uverb_attr->static_rate       = rdma_ah_get_static_rate(rdma_attr);
	uverb_attr->is_global         = !!(rdma_ah_get_ah_flags(rdma_attr) &
					 IB_AH_GRH);
	if (uverb_attr->is_global) {
		grh = rdma_ah_read_grh(rdma_attr);
		memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
		uverb_attr->flow_label        = grh->flow_label;
		uverb_attr->sgid_index        = grh->sgid_index;
		uverb_attr->hop_limit         = grh->hop_limit;
		uverb_attr->traffic_class     = grh->traffic_class;
	}
	uverb_attr->port_num          = rdma_ah_get_port_num(rdma_attr);
}

1679
static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
1680 1681 1682 1683 1684 1685 1686 1687
{
	struct ib_uverbs_query_qp      cmd;
	struct ib_uverbs_query_qp_resp resp;
	struct ib_qp                   *qp;
	struct ib_qp_attr              *attr;
	struct ib_qp_init_attr         *init_attr;
	int                            ret;

1688 1689 1690
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1691 1692 1693 1694 1695 1696 1697 1698

	attr      = kmalloc(sizeof *attr, GFP_KERNEL);
	init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
	if (!attr || !init_attr) {
		ret = -ENOMEM;
		goto out;
	}

1699
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
1700
	if (!qp) {
1701
		ret = -EINVAL;
1702 1703 1704 1705
		goto out;
	}

	ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1706

1707 1708
	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725

	if (ret)
		goto out;

	memset(&resp, 0, sizeof resp);

	resp.qp_state               = attr->qp_state;
	resp.cur_qp_state           = attr->cur_qp_state;
	resp.path_mtu               = attr->path_mtu;
	resp.path_mig_state         = attr->path_mig_state;
	resp.qkey                   = attr->qkey;
	resp.rq_psn                 = attr->rq_psn;
	resp.sq_psn                 = attr->sq_psn;
	resp.dest_qp_num            = attr->dest_qp_num;
	resp.qp_access_flags        = attr->qp_access_flags;
	resp.pkey_index             = attr->pkey_index;
	resp.alt_pkey_index         = attr->alt_pkey_index;
1726
	resp.sq_draining            = attr->sq_draining;
1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
	resp.max_rd_atomic          = attr->max_rd_atomic;
	resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
	resp.min_rnr_timer          = attr->min_rnr_timer;
	resp.port_num               = attr->port_num;
	resp.timeout                = attr->timeout;
	resp.retry_cnt              = attr->retry_cnt;
	resp.rnr_retry              = attr->rnr_retry;
	resp.alt_port_num           = attr->alt_port_num;
	resp.alt_timeout            = attr->alt_timeout;

1737 1738
	copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
	copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
1739 1740 1741 1742 1743 1744

	resp.max_send_wr            = init_attr->cap.max_send_wr;
	resp.max_recv_wr            = init_attr->cap.max_recv_wr;
	resp.max_send_sge           = init_attr->cap.max_send_sge;
	resp.max_recv_sge           = init_attr->cap.max_recv_sge;
	resp.max_inline_data        = init_attr->cap.max_inline_data;
1745
	resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1746

1747
	ret = uverbs_response(attrs, &resp, sizeof(resp));
1748 1749 1750 1751 1752

out:
	kfree(attr);
	kfree(init_attr);

1753
	return ret;
1754 1755
}

1756 1757 1758 1759 1760 1761
/* Remove ignored fields set in the attribute mask */
static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
{
	switch (qp_type) {
	case IB_QPT_XRC_INI:
		return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1762 1763 1764
	case IB_QPT_XRC_TGT:
		return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
				IB_QP_RNR_RETRY);
1765 1766 1767 1768 1769
	default:
		return mask;
	}
}

1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792
static void copy_ah_attr_from_uverbs(struct ib_device *dev,
				     struct rdma_ah_attr *rdma_attr,
				     struct ib_uverbs_qp_dest *uverb_attr)
{
	rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
	if (uverb_attr->is_global) {
		rdma_ah_set_grh(rdma_attr, NULL,
				uverb_attr->flow_label,
				uverb_attr->sgid_index,
				uverb_attr->hop_limit,
				uverb_attr->traffic_class);
		rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
	} else {
		rdma_ah_set_ah_flags(rdma_attr, 0);
	}
	rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
	rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
	rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
	rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
	rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
	rdma_ah_set_make_grd(rdma_attr, false);
}

1793
static int modify_qp(struct uverbs_attr_bundle *attrs,
1794
		     struct ib_uverbs_ex_modify_qp *cmd)
1795
{
1796 1797 1798
	struct ib_qp_attr *attr;
	struct ib_qp *qp;
	int ret;
1799

1800
	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1801 1802 1803
	if (!attr)
		return -ENOMEM;

1804 1805
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
			       attrs);
1806
	if (!qp) {
1807 1808 1809 1810
		ret = -EINVAL;
		goto out;
	}

1811 1812
	if ((cmd->base.attr_mask & IB_QP_PORT) &&
	    !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1813 1814 1815 1816
		ret = -EINVAL;
		goto release_qp;
	}

1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
	if ((cmd->base.attr_mask & IB_QP_AV)) {
		if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
			ret = -EINVAL;
			goto release_qp;
		}

		if (cmd->base.attr_mask & IB_QP_STATE &&
		    cmd->base.qp_state == IB_QPS_RTR) {
		/* We are in INIT->RTR TRANSITION (if we are not,
		 * this transition will be rejected in subsequent checks).
		 * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
		 * but the IB_QP_STATE flag is required.
		 *
		 * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
		 * when IB_QP_AV is set, has required inclusion of a valid
		 * port number in the primary AV. (AVs are created and handled
		 * differently for infiniband and ethernet (RoCE) ports).
		 *
		 * Check the port number included in the primary AV against
		 * the port number in the qp struct, which was set (and saved)
		 * in the RST->INIT transition.
		 */
			if (cmd->base.dest.port_num != qp->real_qp->port) {
				ret = -EINVAL;
				goto release_qp;
			}
		} else {
		/* We are in SQD->SQD. (If we are not, this transition will
		 * be rejected later in the verbs layer checks).
		 * Check for both IB_QP_PORT and IB_QP_AV, these can be set
		 * together in the SQD->SQD transition.
		 *
		 * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
		 * verbs layer driver does not track primary port changes
		 * resulting from path migration. Thus, in SQD, if the primary
		 * AV is modified, the primary port should also be modified).
		 *
		 * Note that in this transition, the IB_QP_STATE flag
		 * is not allowed.
		 */
			if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
			     == (IB_QP_AV | IB_QP_PORT)) &&
			    cmd->base.port_num != cmd->base.dest.port_num) {
				ret = -EINVAL;
				goto release_qp;
			}
			if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
			    == IB_QP_AV) {
				cmd->base.attr_mask |= IB_QP_PORT;
				cmd->base.port_num = cmd->base.dest.port_num;
			}
		}
1869 1870
	}

1871
	if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1872
	    (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
1873 1874
	    !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
	    cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
1875 1876 1877 1878
		ret = -EINVAL;
		goto release_qp;
	}

1879 1880
	if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
	    cmd->base.cur_qp_state > IB_QPS_ERR) ||
1881 1882
	    (cmd->base.attr_mask & IB_QP_STATE &&
	    cmd->base.qp_state > IB_QPS_ERR)) {
1883 1884 1885 1886
		ret = -EINVAL;
		goto release_qp;
	}

1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
	if (cmd->base.attr_mask & IB_QP_STATE)
		attr->qp_state = cmd->base.qp_state;
	if (cmd->base.attr_mask & IB_QP_CUR_STATE)
		attr->cur_qp_state = cmd->base.cur_qp_state;
	if (cmd->base.attr_mask & IB_QP_PATH_MTU)
		attr->path_mtu = cmd->base.path_mtu;
	if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
		attr->path_mig_state = cmd->base.path_mig_state;
	if (cmd->base.attr_mask & IB_QP_QKEY)
		attr->qkey = cmd->base.qkey;
	if (cmd->base.attr_mask & IB_QP_RQ_PSN)
		attr->rq_psn = cmd->base.rq_psn;
	if (cmd->base.attr_mask & IB_QP_SQ_PSN)
		attr->sq_psn = cmd->base.sq_psn;
	if (cmd->base.attr_mask & IB_QP_DEST_QPN)
		attr->dest_qp_num = cmd->base.dest_qp_num;
	if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
		attr->qp_access_flags = cmd->base.qp_access_flags;
	if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
		attr->pkey_index = cmd->base.pkey_index;
	if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
		attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
	if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
		attr->max_rd_atomic = cmd->base.max_rd_atomic;
	if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
		attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
	if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
		attr->min_rnr_timer = cmd->base.min_rnr_timer;
	if (cmd->base.attr_mask & IB_QP_PORT)
		attr->port_num = cmd->base.port_num;
	if (cmd->base.attr_mask & IB_QP_TIMEOUT)
		attr->timeout = cmd->base.timeout;
	if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
		attr->retry_cnt = cmd->base.retry_cnt;
	if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
		attr->rnr_retry = cmd->base.rnr_retry;
	if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
		attr->alt_port_num = cmd->base.alt_port_num;
		attr->alt_timeout = cmd->base.alt_timeout;
		attr->alt_pkey_index = cmd->base.alt_pkey_index;
	}
	if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
		attr->rate_limit = cmd->rate_limit;
1930

1931
	if (cmd->base.attr_mask & IB_QP_AV)
1932 1933
		copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
					 &cmd->base.dest);
1934

1935
	if (cmd->base.attr_mask & IB_QP_ALT_PATH)
1936 1937
		copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
					 &cmd->base.alt_dest);
1938

1939 1940 1941
	ret = ib_modify_qp_with_udata(qp, attr,
				      modify_qp_mask(qp->qp_type,
						     cmd->base.attr_mask),
1942
				      &attrs->driver_udata);
1943

1944
release_qp:
1945 1946
	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
1947 1948 1949 1950 1951 1952
out:
	kfree(attr);

	return ret;
}

1953
static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs)
1954
{
1955
	struct ib_uverbs_ex_modify_qp cmd;
1956
	int ret;
1957

1958 1959 1960
	ret = uverbs_request(attrs, &cmd.base, sizeof(cmd.base));
	if (ret)
		return ret;
1961 1962 1963 1964 1965

	if (cmd.base.attr_mask &
	    ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
		return -EOPNOTSUPP;

1966
	return modify_qp(attrs, &cmd);
1967 1968
}

1969
static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs)
1970
{
1971
	struct ib_uverbs_ex_modify_qp cmd;
1972 1973 1974
	struct ib_uverbs_ex_modify_qp_resp resp = {
		.response_length = uverbs_response_length(attrs, sizeof(resp))
	};
1975 1976
	int ret;

1977 1978 1979 1980
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;

1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
	/*
	 * Last bit is reserved for extending the attr_mask by
	 * using another field.
	 */
	BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));

	if (cmd.base.attr_mask &
	    ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
		return -EOPNOTSUPP;

1991 1992 1993 1994 1995
	ret = modify_qp(attrs, &cmd);
	if (ret)
		return ret;

	return uverbs_response(attrs, &resp, sizeof(resp));
1996 1997
}

1998
static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs)
1999
{
2000 2001
	struct ib_uverbs_destroy_qp      cmd;
	struct ib_uverbs_destroy_qp_resp resp;
2002 2003
	struct ib_uobject		*uobj;
	struct ib_uqp_object        	*obj;
2004
	int ret;
2005

2006 2007 2008
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2009

2010
	uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2011 2012 2013
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);

2014
	obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2015
	memset(&resp, 0, sizeof(resp));
2016
	resp.events_reported = obj->uevent.events_reported;
2017 2018

	uobj_put_destroy(uobj);
2019

2020
	return uverbs_response(attrs, &resp, sizeof(resp));
2021 2022
}

C
Christoph Hellwig 已提交
2023 2024
static void *alloc_wr(size_t wr_size, __u32 num_sge)
{
2025 2026 2027 2028
	if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
		       sizeof (struct ib_sge))
		return NULL;

C
Christoph Hellwig 已提交
2029 2030
	return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
			 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2031
}
C
Christoph Hellwig 已提交
2032

2033
static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
2034 2035 2036 2037
{
	struct ib_uverbs_post_send      cmd;
	struct ib_uverbs_post_send_resp resp;
	struct ib_uverbs_send_wr       *user_wr;
2038 2039
	struct ib_send_wr              *wr = NULL, *last, *next;
	const struct ib_send_wr	       *bad_wr;
2040 2041
	struct ib_qp                   *qp;
	int                             i, sg_ind;
2042
	int				is_ud;
2043
	int ret, ret2;
2044
	size_t                          next_size;
2045 2046 2047
	const struct ib_sge __user *sgls;
	const void __user *wqes;
	struct uverbs_req_iter iter;
2048

2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
	if (ret)
		return ret;
	wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
	if (IS_ERR(wqes))
		return PTR_ERR(wqes);
	sgls = uverbs_request_next_ptr(
		&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
	if (IS_ERR(sgls))
		return PTR_ERR(sgls);
	ret = uverbs_request_finish(&iter);
	if (ret)
		return ret;
2062 2063 2064 2065 2066

	user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
	if (!user_wr)
		return -ENOMEM;

2067
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2068 2069
	if (!qp) {
		ret = -EINVAL;
2070
		goto out;
2071
	}
2072

2073
	is_ud = qp->qp_type == IB_QPT_UD;
2074 2075 2076
	sg_ind = 0;
	last = NULL;
	for (i = 0; i < cmd.wr_count; ++i) {
2077
		if (copy_from_user(user_wr, wqes + i * cmd.wqe_size,
2078 2079
				   cmd.wqe_size)) {
			ret = -EFAULT;
2080
			goto out_put;
2081 2082 2083 2084
		}

		if (user_wr->num_sge + sg_ind > cmd.sge_count) {
			ret = -EINVAL;
2085
			goto out_put;
2086 2087
		}

C
Christoph Hellwig 已提交
2088 2089 2090 2091 2092 2093 2094 2095 2096
		if (is_ud) {
			struct ib_ud_wr *ud;

			if (user_wr->opcode != IB_WR_SEND &&
			    user_wr->opcode != IB_WR_SEND_WITH_IMM) {
				ret = -EINVAL;
				goto out_put;
			}

2097 2098
			next_size = sizeof(*ud);
			ud = alloc_wr(next_size, user_wr->num_sge);
C
Christoph Hellwig 已提交
2099 2100 2101 2102 2103
			if (!ud) {
				ret = -ENOMEM;
				goto out_put;
			}

2104
			ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
2105
						   user_wr->wr.ud.ah, attrs);
C
Christoph Hellwig 已提交
2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
			if (!ud->ah) {
				kfree(ud);
				ret = -EINVAL;
				goto out_put;
			}
			ud->remote_qpn = user_wr->wr.ud.remote_qpn;
			ud->remote_qkey = user_wr->wr.ud.remote_qkey;

			next = &ud->wr;
		} else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
			   user_wr->opcode == IB_WR_RDMA_WRITE ||
			   user_wr->opcode == IB_WR_RDMA_READ) {
			struct ib_rdma_wr *rdma;

2120 2121
			next_size = sizeof(*rdma);
			rdma = alloc_wr(next_size, user_wr->num_sge);
C
Christoph Hellwig 已提交
2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134
			if (!rdma) {
				ret = -ENOMEM;
				goto out_put;
			}

			rdma->remote_addr = user_wr->wr.rdma.remote_addr;
			rdma->rkey = user_wr->wr.rdma.rkey;

			next = &rdma->wr;
		} else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
			   user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
			struct ib_atomic_wr *atomic;

2135 2136
			next_size = sizeof(*atomic);
			atomic = alloc_wr(next_size, user_wr->num_sge);
C
Christoph Hellwig 已提交
2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150
			if (!atomic) {
				ret = -ENOMEM;
				goto out_put;
			}

			atomic->remote_addr = user_wr->wr.atomic.remote_addr;
			atomic->compare_add = user_wr->wr.atomic.compare_add;
			atomic->swap = user_wr->wr.atomic.swap;
			atomic->rkey = user_wr->wr.atomic.rkey;

			next = &atomic->wr;
		} else if (user_wr->opcode == IB_WR_SEND ||
			   user_wr->opcode == IB_WR_SEND_WITH_IMM ||
			   user_wr->opcode == IB_WR_SEND_WITH_INV) {
2151 2152
			next_size = sizeof(*next);
			next = alloc_wr(next_size, user_wr->num_sge);
C
Christoph Hellwig 已提交
2153 2154 2155 2156 2157 2158
			if (!next) {
				ret = -ENOMEM;
				goto out_put;
			}
		} else {
			ret = -EINVAL;
2159
			goto out_put;
2160 2161
		}

C
Christoph Hellwig 已提交
2162 2163 2164 2165 2166 2167 2168 2169
		if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
		    user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
			next->ex.imm_data =
					(__be32 __force) user_wr->ex.imm_data;
		} else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
			next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
		}

2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
		if (!last)
			wr = next;
		else
			last->next = next;
		last = next;

		next->next       = NULL;
		next->wr_id      = user_wr->wr_id;
		next->num_sge    = user_wr->num_sge;
		next->opcode     = user_wr->opcode;
		next->send_flags = user_wr->send_flags;

		if (next->num_sge) {
			next->sg_list = (void *) next +
2184
				ALIGN(next_size, sizeof(struct ib_sge));
2185 2186 2187
			if (copy_from_user(next->sg_list, sgls + sg_ind,
					   next->num_sge *
						   sizeof(struct ib_sge))) {
2188
				ret = -EFAULT;
2189
				goto out_put;
2190 2191 2192 2193 2194 2195 2196
			}
			sg_ind += next->num_sge;
		} else
			next->sg_list = NULL;
	}

	resp.bad_wr = 0;
K
Kamal Heib 已提交
2197
	ret = qp->device->ops.post_send(qp->real_qp, wr, &bad_wr);
2198 2199 2200 2201 2202 2203 2204
	if (ret)
		for (next = wr; next; next = next->next) {
			++resp.bad_wr;
			if (next == bad_wr)
				break;
		}

2205 2206 2207
	ret2 = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret2)
		ret = ret2;
2208

2209
out_put:
2210 2211
	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
2212 2213

	while (wr) {
C
Christoph Hellwig 已提交
2214
		if (is_ud && ud_wr(wr)->ah)
2215
			uobj_put_obj_read(ud_wr(wr)->ah);
2216 2217 2218 2219 2220
		next = wr->next;
		kfree(wr);
		wr = next;
	}

2221
out:
2222 2223
	kfree(user_wr);

2224
	return ret;
2225 2226
}

2227 2228 2229
static struct ib_recv_wr *
ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
			  u32 wqe_size, u32 sge_count)
2230 2231 2232 2233 2234 2235
{
	struct ib_uverbs_recv_wr *user_wr;
	struct ib_recv_wr        *wr = NULL, *last, *next;
	int                       sg_ind;
	int                       i;
	int                       ret;
2236 2237
	const struct ib_sge __user *sgls;
	const void __user *wqes;
2238 2239 2240 2241

	if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
		return ERR_PTR(-EINVAL);

2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
	wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
	if (IS_ERR(wqes))
		return ERR_CAST(wqes);
	sgls = uverbs_request_next_ptr(
		iter, sge_count * sizeof(struct ib_uverbs_sge));
	if (IS_ERR(sgls))
		return ERR_CAST(sgls);
	ret = uverbs_request_finish(iter);
	if (ret)
		return ERR_PTR(ret);

2253 2254 2255 2256 2257 2258 2259
	user_wr = kmalloc(wqe_size, GFP_KERNEL);
	if (!user_wr)
		return ERR_PTR(-ENOMEM);

	sg_ind = 0;
	last = NULL;
	for (i = 0; i < wr_count; ++i) {
2260
		if (copy_from_user(user_wr, wqes + i * wqe_size,
2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
				   wqe_size)) {
			ret = -EFAULT;
			goto err;
		}

		if (user_wr->num_sge + sg_ind > sge_count) {
			ret = -EINVAL;
			goto err;
		}

2271 2272 2273 2274 2275 2276 2277
		if (user_wr->num_sge >=
		    (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
		    sizeof (struct ib_sge)) {
			ret = -EINVAL;
			goto err;
		}

2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
			       user_wr->num_sge * sizeof (struct ib_sge),
			       GFP_KERNEL);
		if (!next) {
			ret = -ENOMEM;
			goto err;
		}

		if (!last)
			wr = next;
		else
			last->next = next;
		last = next;

		next->next       = NULL;
		next->wr_id      = user_wr->wr_id;
		next->num_sge    = user_wr->num_sge;

		if (next->num_sge) {
			next->sg_list = (void *) next +
				ALIGN(sizeof *next, sizeof (struct ib_sge));
2299 2300 2301
			if (copy_from_user(next->sg_list, sgls + sg_ind,
					   next->num_sge *
						   sizeof(struct ib_sge))) {
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324
				ret = -EFAULT;
				goto err;
			}
			sg_ind += next->num_sge;
		} else
			next->sg_list = NULL;
	}

	kfree(user_wr);
	return wr;

err:
	kfree(user_wr);

	while (wr) {
		next = wr->next;
		kfree(wr);
		wr = next;
	}

	return ERR_PTR(ret);
}

2325
static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
2326 2327 2328
{
	struct ib_uverbs_post_recv      cmd;
	struct ib_uverbs_post_recv_resp resp;
2329 2330
	struct ib_recv_wr              *wr, *next;
	const struct ib_recv_wr	       *bad_wr;
2331
	struct ib_qp                   *qp;
2332
	int ret, ret2;
2333
	struct uverbs_req_iter iter;
2334

2335 2336 2337
	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2338

2339 2340
	wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
				       cmd.sge_count);
2341 2342 2343
	if (IS_ERR(wr))
		return PTR_ERR(wr);

2344
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2345 2346
	if (!qp) {
		ret = -EINVAL;
2347
		goto out;
2348
	}
2349 2350

	resp.bad_wr = 0;
K
Kamal Heib 已提交
2351
	ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr);
2352

2353 2354
	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
2355
	if (ret) {
2356 2357 2358 2359 2360
		for (next = wr; next; next = next->next) {
			++resp.bad_wr;
			if (next == bad_wr)
				break;
		}
2361
	}
2362

2363 2364 2365
	ret2 = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret2)
		ret = ret2;
2366 2367 2368 2369 2370 2371 2372
out:
	while (wr) {
		next = wr->next;
		kfree(wr);
		wr = next;
	}

2373
	return ret;
2374 2375
}

2376
static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
2377 2378 2379
{
	struct ib_uverbs_post_srq_recv      cmd;
	struct ib_uverbs_post_srq_recv_resp resp;
2380 2381
	struct ib_recv_wr                  *wr, *next;
	const struct ib_recv_wr		   *bad_wr;
2382
	struct ib_srq                      *srq;
2383
	int ret, ret2;
2384
	struct uverbs_req_iter iter;
2385

2386 2387 2388
	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2389

2390 2391
	wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
				       cmd.sge_count);
2392 2393 2394
	if (IS_ERR(wr))
		return PTR_ERR(wr);

2395
	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
2396 2397
	if (!srq) {
		ret = -EINVAL;
2398
		goto out;
2399
	}
2400 2401

	resp.bad_wr = 0;
K
Kamal Heib 已提交
2402
	ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr);
2403

2404 2405
	rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
2406

2407 2408 2409 2410 2411 2412 2413
	if (ret)
		for (next = wr; next; next = next->next) {
			++resp.bad_wr;
			if (next == bad_wr)
				break;
		}

2414 2415 2416
	ret2 = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret2)
		ret = ret2;
2417 2418 2419 2420 2421 2422 2423 2424

out:
	while (wr) {
		next = wr->next;
		kfree(wr);
		wr = next;
	}

2425
	return ret;
2426 2427
}

2428
static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
2429 2430 2431 2432 2433 2434
{
	struct ib_uverbs_create_ah	 cmd;
	struct ib_uverbs_create_ah_resp	 resp;
	struct ib_uobject		*uobj;
	struct ib_pd			*pd;
	struct ib_ah			*ah;
2435
	struct rdma_ah_attr		attr = {};
2436
	int ret;
2437
	struct ib_device *ib_dev;
2438

2439 2440 2441
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2442

2443
	uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev);
2444 2445
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
2446

2447 2448 2449 2450 2451
	if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) {
		ret = -EINVAL;
		goto err;
	}

2452
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
2453
	if (!pd) {
2454
		ret = -EINVAL;
2455
		goto err;
2456 2457
	}

2458
	attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
2459
	rdma_ah_set_make_grd(&attr, false);
2460 2461 2462 2463 2464 2465
	rdma_ah_set_dlid(&attr, cmd.attr.dlid);
	rdma_ah_set_sl(&attr, cmd.attr.sl);
	rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
	rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
	rdma_ah_set_port_num(&attr, cmd.attr.port_num);

2466
	if (cmd.attr.is_global) {
2467 2468 2469 2470 2471
		rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
				cmd.attr.grh.sgid_index,
				cmd.attr.grh.hop_limit,
				cmd.attr.grh.traffic_class);
		rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
2472
	} else {
2473
		rdma_ah_set_ah_flags(&attr, 0);
2474
	}
2475

2476
	ah = rdma_create_user_ah(pd, &attr, &attrs->driver_udata);
2477 2478
	if (IS_ERR(ah)) {
		ret = PTR_ERR(ah);
2479
		goto err_put;
2480 2481
	}

2482
	ah->uobject  = uobj;
2483
	uobj->user_handle = cmd.user_handle;
2484
	uobj->object = ah;
2485 2486 2487

	resp.ah_handle = uobj->id;

2488 2489
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
2490
		goto err_copy;
2491

2492
	uobj_put_obj_read(pd);
2493 2494
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
2495

2496
err_copy:
2497 2498
	rdma_destroy_ah_user(ah, RDMA_DESTROY_AH_SLEEPABLE,
			     uverbs_get_cleared_udata(attrs));
2499

2500 2501
err_put:
	uobj_put_obj_read(pd);
2502

2503
err:
2504
	uobj_alloc_abort(uobj, attrs);
2505 2506 2507
	return ret;
}

2508
static int ib_uverbs_destroy_ah(struct uverbs_attr_bundle *attrs)
2509 2510
{
	struct ib_uverbs_destroy_ah cmd;
2511
	int ret;
2512

2513 2514 2515
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2516

2517
	return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, attrs);
2518 2519
}

2520
static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
2521 2522 2523
{
	struct ib_uverbs_attach_mcast cmd;
	struct ib_qp                 *qp;
2524
	struct ib_uqp_object         *obj;
2525
	struct ib_uverbs_mcast_entry *mcast;
2526
	int                           ret;
2527

2528 2529 2530
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2531

2532
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2533 2534
	if (!qp)
		return -EINVAL;
2535

2536
	obj = qp->uobject;
2537

2538
	mutex_lock(&obj->mcast_lock);
2539
	list_for_each_entry(mcast, &obj->mcast_list, list)
2540 2541 2542
		if (cmd.mlid == mcast->lid &&
		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
			ret = 0;
2543
			goto out_put;
2544 2545 2546 2547 2548
		}

	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
	if (!mcast) {
		ret = -ENOMEM;
2549
		goto out_put;
2550 2551 2552 2553
	}

	mcast->lid = cmd.mlid;
	memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2554

2555
	ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2556 2557 2558
	if (!ret)
		list_add_tail(&mcast->list, &obj->mcast_list);
	else
2559 2560
		kfree(mcast);

2561
out_put:
2562
	mutex_unlock(&obj->mcast_lock);
2563 2564
	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
2565

2566
	return ret;
2567 2568
}

2569
static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
2570 2571
{
	struct ib_uverbs_detach_mcast cmd;
2572
	struct ib_uqp_object         *obj;
2573
	struct ib_qp                 *qp;
2574
	struct ib_uverbs_mcast_entry *mcast;
2575
	int                           ret;
2576
	bool                          found = false;
2577

2578 2579 2580
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2581

2582
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2583 2584
	if (!qp)
		return -EINVAL;
2585

2586
	obj = qp->uobject;
2587
	mutex_lock(&obj->mcast_lock);
2588

2589
	list_for_each_entry(mcast, &obj->mcast_list, list)
2590 2591 2592 2593
		if (cmd.mlid == mcast->lid &&
		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
			list_del(&mcast->list);
			kfree(mcast);
2594
			found = true;
2595 2596 2597
			break;
		}

2598 2599 2600 2601 2602 2603 2604
	if (!found) {
		ret = -EINVAL;
		goto out_put;
	}

	ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);

2605
out_put:
2606
	mutex_unlock(&obj->mcast_lock);
2607 2608
	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
2609
	return ret;
2610
}
2611

2612
struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
2613 2614 2615
{
	struct ib_uflow_resources *resources;

2616
	resources = kzalloc(sizeof(*resources), GFP_KERNEL);
2617 2618

	if (!resources)
2619
		return NULL;
2620

2621 2622 2623
	if (!num_specs)
		goto out;

2624 2625 2626 2627 2628
	resources->counters =
		kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
	resources->collection =
		kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);

2629 2630
	if (!resources->counters || !resources->collection)
		goto err;
2631

2632
out:
2633 2634
	resources->max = num_specs;
	return resources;
2635

2636
err:
2637 2638
	kfree(resources->counters);
	kfree(resources);
2639

2640
	return NULL;
2641
}
2642
EXPORT_SYMBOL(flow_resources_alloc);
2643 2644 2645 2646 2647

void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
{
	unsigned int i;

2648 2649 2650
	if (!uflow_res)
		return;

2651
	for (i = 0; i < uflow_res->collection_num; i++)
2652 2653
		atomic_dec(&uflow_res->collection[i]->usecnt);

2654 2655 2656 2657 2658
	for (i = 0; i < uflow_res->counters_num; i++)
		atomic_dec(&uflow_res->counters[i]->usecnt);

	kfree(uflow_res->collection);
	kfree(uflow_res->counters);
2659 2660
	kfree(uflow_res);
}
2661
EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
2662

2663 2664 2665
void flow_resources_add(struct ib_uflow_resources *uflow_res,
			enum ib_flow_spec_type type,
			void *ibobj)
2666 2667 2668
{
	WARN_ON(uflow_res->num >= uflow_res->max);

2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
	switch (type) {
	case IB_FLOW_SPEC_ACTION_HANDLE:
		atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
		uflow_res->collection[uflow_res->collection_num++] =
			(struct ib_flow_action *)ibobj;
		break;
	case IB_FLOW_SPEC_ACTION_COUNT:
		atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
		uflow_res->counters[uflow_res->counters_num++] =
			(struct ib_counters *)ibobj;
		break;
	default:
		WARN_ON(1);
	}

	uflow_res->num++;
2685
}
2686
EXPORT_SYMBOL(flow_resources_add);
2687

2688
static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
2689 2690 2691
				       struct ib_uverbs_flow_spec *kern_spec,
				       union ib_flow_spec *ib_spec,
				       struct ib_uflow_resources *uflow_res)
2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702
{
	ib_spec->type = kern_spec->type;
	switch (ib_spec->type) {
	case IB_FLOW_SPEC_ACTION_TAG:
		if (kern_spec->flow_tag.size !=
		    sizeof(struct ib_uverbs_flow_spec_action_tag))
			return -EINVAL;

		ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
		ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
		break;
2703 2704 2705 2706 2707 2708 2709
	case IB_FLOW_SPEC_ACTION_DROP:
		if (kern_spec->drop.size !=
		    sizeof(struct ib_uverbs_flow_spec_action_drop))
			return -EINVAL;

		ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
		break;
2710 2711 2712 2713 2714 2715 2716
	case IB_FLOW_SPEC_ACTION_HANDLE:
		if (kern_spec->action.size !=
		    sizeof(struct ib_uverbs_flow_spec_action_handle))
			return -EOPNOTSUPP;
		ib_spec->action.act = uobj_get_obj_read(flow_action,
							UVERBS_OBJECT_FLOW_ACTION,
							kern_spec->action.handle,
2717
							attrs);
2718 2719 2720 2721
		if (!ib_spec->action.act)
			return -EINVAL;
		ib_spec->action.size =
			sizeof(struct ib_flow_spec_action_handle);
2722 2723 2724
		flow_resources_add(uflow_res,
				   IB_FLOW_SPEC_ACTION_HANDLE,
				   ib_spec->action.act);
2725 2726
		uobj_put_obj_read(ib_spec->action.act);
		break;
2727 2728 2729 2730 2731 2732 2733 2734
	case IB_FLOW_SPEC_ACTION_COUNT:
		if (kern_spec->flow_count.size !=
			sizeof(struct ib_uverbs_flow_spec_action_count))
			return -EINVAL;
		ib_spec->flow_count.counters =
			uobj_get_obj_read(counters,
					  UVERBS_OBJECT_COUNTERS,
					  kern_spec->flow_count.handle,
2735
					  attrs);
2736 2737 2738 2739 2740 2741 2742 2743 2744
		if (!ib_spec->flow_count.counters)
			return -EINVAL;
		ib_spec->flow_count.size =
				sizeof(struct ib_flow_spec_action_count);
		flow_resources_add(uflow_res,
				   IB_FLOW_SPEC_ACTION_COUNT,
				   ib_spec->flow_count.counters);
		uobj_put_obj_read(ib_spec->flow_count.counters);
		break;
2745 2746 2747 2748 2749 2750
	default:
		return -EINVAL;
	}
	return 0;
}

2751
static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768
				u16 ib_real_filter_sz)
{
	/*
	 * User space filter structures must be 64 bit aligned, otherwise this
	 * may pass, but we won't handle additional new attributes.
	 */

	if (kern_filter_size > ib_real_filter_sz) {
		if (memchr_inv(kern_spec_filter +
			       ib_real_filter_sz, 0,
			       kern_filter_size - ib_real_filter_sz))
			return -EINVAL;
		return ib_real_filter_sz;
	}
	return kern_filter_size;
}

2769 2770 2771 2772 2773
int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
					  const void *kern_spec_mask,
					  const void *kern_spec_val,
					  size_t kern_filter_sz,
					  union ib_flow_spec *ib_spec)
2774
{
2775 2776 2777 2778 2779 2780 2781
	ssize_t actual_filter_sz;
	ssize_t ib_filter_sz;

	/* User flow spec size must be aligned to 4 bytes */
	if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
		return -EINVAL;

2782 2783
	ib_spec->type = type;

2784 2785
	if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
		return -EINVAL;
2786

2787
	switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2788
	case IB_FLOW_SPEC_ETH:
2789 2790 2791 2792 2793
		ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
2794
			return -EINVAL;
2795 2796 2797
		ib_spec->size = sizeof(struct ib_flow_spec_eth);
		memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
2798 2799
		break;
	case IB_FLOW_SPEC_IPV4:
2800 2801 2802 2803 2804
		ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
2805
			return -EINVAL;
2806 2807 2808
		ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
		memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
2809
		break;
2810
	case IB_FLOW_SPEC_IPV6:
2811 2812 2813 2814 2815
		ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
2816
			return -EINVAL;
2817 2818 2819
		ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
		memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
2820 2821 2822 2823

		if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
		    (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
			return -EINVAL;
2824
		break;
2825 2826
	case IB_FLOW_SPEC_TCP:
	case IB_FLOW_SPEC_UDP:
2827 2828 2829 2830 2831
		ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
2832
			return -EINVAL;
2833 2834 2835
		ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
		memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
2836
		break;
2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851
	case IB_FLOW_SPEC_VXLAN_TUNNEL:
		ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
			return -EINVAL;
		ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
		memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);

		if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
		    (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
			return -EINVAL;
		break;
2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862
	case IB_FLOW_SPEC_ESP:
		ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
			return -EINVAL;
		ib_spec->esp.size = sizeof(struct ib_flow_spec_esp);
		memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
		break;
2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873
	case IB_FLOW_SPEC_GRE:
		ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
			return -EINVAL;
		ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
		memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
		break;
2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884
	case IB_FLOW_SPEC_MPLS:
		ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
			return -EINVAL;
		ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
		memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
		break;
2885 2886 2887 2888 2889 2890
	default:
		return -EINVAL;
	}
	return 0;
}

2891 2892 2893
static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
				       union ib_flow_spec *ib_spec)
{
2894
	size_t kern_filter_sz;
2895 2896 2897
	void *kern_spec_mask;
	void *kern_spec_val;

2898 2899 2900 2901 2902 2903
	if (check_sub_overflow((size_t)kern_spec->hdr.size,
			       sizeof(struct ib_uverbs_flow_spec_hdr),
			       &kern_filter_sz))
		return -EINVAL;

	kern_filter_sz /= 2;
2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914

	kern_spec_val = (void *)kern_spec +
		sizeof(struct ib_uverbs_flow_spec_hdr);
	kern_spec_mask = kern_spec_val + kern_filter_sz;

	return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type,
						     kern_spec_mask,
						     kern_spec_val,
						     kern_filter_sz, ib_spec);
}

2915
static int kern_spec_to_ib_spec(struct uverbs_attr_bundle *attrs,
2916 2917 2918
				struct ib_uverbs_flow_spec *kern_spec,
				union ib_flow_spec *ib_spec,
				struct ib_uflow_resources *uflow_res)
2919 2920 2921 2922 2923
{
	if (kern_spec->reserved)
		return -EINVAL;

	if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2924
		return kern_spec_to_ib_spec_action(attrs, kern_spec, ib_spec,
2925
						   uflow_res);
2926 2927 2928 2929
	else
		return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
}

2930
static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
Y
Yishai Hadas 已提交
2931
{
2932
	struct ib_uverbs_ex_create_wq cmd;
Y
Yishai Hadas 已提交
2933 2934 2935 2936 2937 2938 2939
	struct ib_uverbs_ex_create_wq_resp resp = {};
	struct ib_uwq_object           *obj;
	int err = 0;
	struct ib_cq *cq;
	struct ib_pd *pd;
	struct ib_wq *wq;
	struct ib_wq_init_attr wq_init_attr = {};
2940
	struct ib_device *ib_dev;
Y
Yishai Hadas 已提交
2941

2942
	err = uverbs_request(attrs, &cmd, sizeof(cmd));
Y
Yishai Hadas 已提交
2943 2944 2945 2946 2947 2948
	if (err)
		return err;

	if (cmd.comp_mask)
		return -EOPNOTSUPP;

2949
	obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, attrs,
2950
						 &ib_dev);
2951 2952
	if (IS_ERR(obj))
		return PTR_ERR(obj);
Y
Yishai Hadas 已提交
2953

2954
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
Y
Yishai Hadas 已提交
2955 2956 2957 2958 2959
	if (!pd) {
		err = -EINVAL;
		goto err_uobj;
	}

2960
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
Y
Yishai Hadas 已提交
2961 2962 2963 2964 2965 2966 2967 2968 2969 2970
	if (!cq) {
		err = -EINVAL;
		goto err_put_pd;
	}

	wq_init_attr.cq = cq;
	wq_init_attr.max_sge = cmd.max_sge;
	wq_init_attr.max_wr = cmd.max_wr;
	wq_init_attr.wq_type = cmd.wq_type;
	wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
2971
	wq_init_attr.create_flags = cmd.create_flags;
Y
Yishai Hadas 已提交
2972
	INIT_LIST_HEAD(&obj->uevent.event_list);
2973
	obj->uevent.uobject.user_handle = cmd.user_handle;
2974

K
Kamal Heib 已提交
2975
	wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
Y
Yishai Hadas 已提交
2976 2977 2978 2979 2980
	if (IS_ERR(wq)) {
		err = PTR_ERR(wq);
		goto err_put_cq;
	}

2981
	wq->uobject = obj;
Y
Yishai Hadas 已提交
2982 2983 2984 2985 2986 2987 2988 2989
	obj->uevent.uobject.object = wq;
	wq->wq_type = wq_init_attr.wq_type;
	wq->cq = cq;
	wq->pd = pd;
	wq->device = pd->device;
	atomic_set(&wq->usecnt, 0);
	atomic_inc(&pd->usecnt);
	atomic_inc(&cq->usecnt);
2990 2991 2992
	obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
	if (obj->uevent.event_file)
		uverbs_uobject_get(&obj->uevent.event_file->uobj);
Y
Yishai Hadas 已提交
2993 2994 2995 2996 2997 2998

	memset(&resp, 0, sizeof(resp));
	resp.wq_handle = obj->uevent.uobject.id;
	resp.max_sge = wq_init_attr.max_sge;
	resp.max_wr = wq_init_attr.max_wr;
	resp.wqn = wq->wq_num;
2999
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3000
	err = uverbs_response(attrs, &resp, sizeof(resp));
Y
Yishai Hadas 已提交
3001 3002 3003
	if (err)
		goto err_copy;

3004
	uobj_put_obj_read(pd);
3005 3006
	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
3007 3008
	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
	return 0;
Y
Yishai Hadas 已提交
3009 3010

err_copy:
3011 3012
	if (obj->uevent.event_file)
		uverbs_uobject_put(&obj->uevent.event_file->uobj);
3013
	ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs));
Y
Yishai Hadas 已提交
3014
err_put_cq:
3015 3016
	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
Y
Yishai Hadas 已提交
3017
err_put_pd:
3018
	uobj_put_obj_read(pd);
Y
Yishai Hadas 已提交
3019
err_uobj:
3020
	uobj_alloc_abort(&obj->uevent.uobject, attrs);
Y
Yishai Hadas 已提交
3021 3022 3023 3024

	return err;
}

3025
static int ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle *attrs)
Y
Yishai Hadas 已提交
3026
{
3027
	struct ib_uverbs_ex_destroy_wq	cmd;
Y
Yishai Hadas 已提交
3028 3029 3030 3031 3032
	struct ib_uverbs_ex_destroy_wq_resp	resp = {};
	struct ib_uobject		*uobj;
	struct ib_uwq_object		*obj;
	int				ret;

3033
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
Y
Yishai Hadas 已提交
3034 3035 3036 3037 3038 3039
	if (ret)
		return ret;

	if (cmd.comp_mask)
		return -EOPNOTSUPP;

3040
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3041
	uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
3042 3043
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
Y
Yishai Hadas 已提交
3044 3045 3046

	obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
	resp.events_reported = obj->uevent.events_reported;
3047 3048

	uobj_put_destroy(uobj);
Y
Yishai Hadas 已提交
3049

3050
	return uverbs_response(attrs, &resp, sizeof(resp));
Y
Yishai Hadas 已提交
3051 3052
}

3053
static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
Y
Yishai Hadas 已提交
3054
{
3055
	struct ib_uverbs_ex_modify_wq cmd;
Y
Yishai Hadas 已提交
3056 3057 3058 3059
	struct ib_wq *wq;
	struct ib_wq_attr wq_attr = {};
	int ret;

3060
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
Y
Yishai Hadas 已提交
3061 3062 3063 3064 3065 3066
	if (ret)
		return ret;

	if (!cmd.attr_mask)
		return -EINVAL;

3067
	if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
Y
Yishai Hadas 已提交
3068 3069
		return -EINVAL;

3070
	wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
Y
Yishai Hadas 已提交
3071 3072 3073 3074 3075
	if (!wq)
		return -EINVAL;

	wq_attr.curr_wq_state = cmd.curr_wq_state;
	wq_attr.wq_state = cmd.wq_state;
3076 3077 3078 3079
	if (cmd.attr_mask & IB_WQ_FLAGS) {
		wq_attr.flags = cmd.flags;
		wq_attr.flags_mask = cmd.flags_mask;
	}
K
Kamal Heib 已提交
3080 3081
	ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
					&attrs->driver_udata);
3082 3083
	rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
Y
Yishai Hadas 已提交
3084 3085 3086
	return ret;
}

3087
static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3088
{
3089
	struct ib_uverbs_ex_create_rwq_ind_table cmd;
3090 3091
	struct ib_uverbs_ex_create_rwq_ind_table_resp  resp = {};
	struct ib_uobject		  *uobj;
3092
	int err;
3093 3094 3095 3096 3097 3098 3099
	struct ib_rwq_ind_table_init_attr init_attr = {};
	struct ib_rwq_ind_table *rwq_ind_tbl;
	struct ib_wq	**wqs = NULL;
	u32 *wqs_handles = NULL;
	struct ib_wq	*wq = NULL;
	int i, j, num_read_wqs;
	u32 num_wq_handles;
3100
	struct uverbs_req_iter iter;
3101
	struct ib_device *ib_dev;
3102

3103
	err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118
	if (err)
		return err;

	if (cmd.comp_mask)
		return -EOPNOTSUPP;

	if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
		return -EINVAL;

	num_wq_handles = 1 << cmd.log_ind_tbl_size;
	wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
			      GFP_KERNEL);
	if (!wqs_handles)
		return -ENOMEM;

3119 3120 3121 3122 3123 3124
	err = uverbs_request_next(&iter, wqs_handles,
				  num_wq_handles * sizeof(__u32));
	if (err)
		goto err_free;

	err = uverbs_request_finish(&iter);
3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135
	if (err)
		goto err_free;

	wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
	if (!wqs) {
		err = -ENOMEM;
		goto  err_free;
	}

	for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
			num_read_wqs++) {
3136
		wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
3137
				       wqs_handles[num_read_wqs], attrs);
3138 3139 3140 3141 3142 3143 3144 3145
		if (!wq) {
			err = -EINVAL;
			goto put_wqs;
		}

		wqs[num_read_wqs] = wq;
	}

3146
	uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev);
3147 3148
	if (IS_ERR(uobj)) {
		err = PTR_ERR(uobj);
3149 3150 3151 3152 3153
		goto put_wqs;
	}

	init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
	init_attr.ind_tbl = wqs;
3154

K
Kamal Heib 已提交
3155 3156
	rwq_ind_tbl = ib_dev->ops.create_rwq_ind_table(ib_dev, &init_attr,
						       &attrs->driver_udata);
3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174

	if (IS_ERR(rwq_ind_tbl)) {
		err = PTR_ERR(rwq_ind_tbl);
		goto err_uobj;
	}

	rwq_ind_tbl->ind_tbl = wqs;
	rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
	rwq_ind_tbl->uobject = uobj;
	uobj->object = rwq_ind_tbl;
	rwq_ind_tbl->device = ib_dev;
	atomic_set(&rwq_ind_tbl->usecnt, 0);

	for (i = 0; i < num_wq_handles; i++)
		atomic_inc(&wqs[i]->usecnt);

	resp.ind_tbl_handle = uobj->id;
	resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3175
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3176

3177
	err = uverbs_response(attrs, &resp, sizeof(resp));
3178 3179 3180 3181 3182 3183
	if (err)
		goto err_copy;

	kfree(wqs_handles);

	for (j = 0; j < num_read_wqs; j++)
3184 3185
		rdma_lookup_put_uobject(&wqs[j]->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
3186

3187 3188
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
3189 3190 3191 3192

err_copy:
	ib_destroy_rwq_ind_table(rwq_ind_tbl);
err_uobj:
3193
	uobj_alloc_abort(uobj, attrs);
3194 3195
put_wqs:
	for (j = 0; j < num_read_wqs; j++)
3196 3197
		rdma_lookup_put_uobject(&wqs[j]->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
3198 3199 3200 3201 3202 3203
err_free:
	kfree(wqs_handles);
	kfree(wqs);
	return err;
}

3204
static int ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3205
{
3206 3207
	struct ib_uverbs_ex_destroy_rwq_ind_table cmd;
	int ret;
3208

3209
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3210 3211 3212 3213 3214 3215
	if (ret)
		return ret;

	if (cmd.comp_mask)
		return -EOPNOTSUPP;

3216
	return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL,
3217
				    cmd.ind_tbl_handle, attrs);
3218 3219
}

3220
static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
3221 3222 3223 3224 3225
{
	struct ib_uverbs_create_flow	  cmd;
	struct ib_uverbs_create_flow_resp resp;
	struct ib_uobject		  *uobj;
	struct ib_flow			  *flow_id;
3226
	struct ib_uverbs_flow_attr	  *kern_flow_attr;
3227 3228
	struct ib_flow_attr		  *flow_attr;
	struct ib_qp			  *qp;
3229
	struct ib_uflow_resources	  *uflow_res;
3230
	struct ib_uverbs_flow_spec_hdr	  *kern_spec;
3231 3232
	struct uverbs_req_iter iter;
	int err;
3233 3234
	void *ib_spec;
	int i;
3235
	struct ib_device *ib_dev;
3236

3237
	err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3238 3239 3240
	if (err)
		return err;

3241 3242 3243
	if (cmd.comp_mask)
		return -EINVAL;

3244
	if (!capable(CAP_NET_RAW))
3245 3246
		return -EPERM;

3247 3248 3249 3250 3251 3252 3253 3254
	if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
		return -EINVAL;

	if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
	    ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
	     (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
		return -EINVAL;

3255
	if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3256 3257
		return -EINVAL;

3258
	if (cmd.flow_attr.size >
3259
	    (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3260 3261
		return -EINVAL;

3262 3263 3264 3265
	if (cmd.flow_attr.reserved[0] ||
	    cmd.flow_attr.reserved[1])
		return -EINVAL;

3266
	if (cmd.flow_attr.num_of_specs) {
3267 3268
		kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
					 GFP_KERNEL);
3269 3270 3271
		if (!kern_flow_attr)
			return -ENOMEM;

3272
		*kern_flow_attr = cmd.flow_attr;
3273 3274
		err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs,
					  cmd.flow_attr.size);
3275
		if (err)
3276 3277 3278 3279 3280
			goto err_free_attr;
	} else {
		kern_flow_attr = &cmd.flow_attr;
	}

3281 3282 3283 3284
	err = uverbs_request_finish(&iter);
	if (err)
		goto err_free_attr;

3285
	uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev);
3286 3287
	if (IS_ERR(uobj)) {
		err = PTR_ERR(uobj);
3288 3289 3290
		goto err_free_attr;
	}

3291
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
3292 3293 3294 3295 3296
	if (!qp) {
		err = -EINVAL;
		goto err_uobj;
	}

3297 3298 3299 3300 3301
	if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
		err = -EINVAL;
		goto err_put;
	}

3302 3303
	flow_attr = kzalloc(struct_size(flow_attr, flows,
				cmd.flow_attr.num_of_specs), GFP_KERNEL);
3304 3305 3306 3307
	if (!flow_attr) {
		err = -ENOMEM;
		goto err_put;
	}
3308 3309 3310 3311 3312
	uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs);
	if (!uflow_res) {
		err = -ENOMEM;
		goto err_free_flow_attr;
	}
3313 3314 3315 3316 3317 3318 3319 3320

	flow_attr->type = kern_flow_attr->type;
	flow_attr->priority = kern_flow_attr->priority;
	flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
	flow_attr->port = kern_flow_attr->port;
	flow_attr->flags = kern_flow_attr->flags;
	flow_attr->size = sizeof(*flow_attr);

3321
	kern_spec = kern_flow_attr->flow_specs;
3322
	ib_spec = flow_attr + 1;
3323
	for (i = 0; i < flow_attr->num_of_specs &&
3324
			cmd.flow_attr.size >= sizeof(*kern_spec) &&
3325 3326 3327
			cmd.flow_attr.size >= kern_spec->size;
	     i++) {
		err = kern_spec_to_ib_spec(
3328
				attrs, (struct ib_uverbs_flow_spec *)kern_spec,
3329
				ib_spec, uflow_res);
3330 3331
		if (err)
			goto err_free;
3332

3333 3334
		flow_attr->size +=
			((union ib_flow_spec *) ib_spec)->size;
3335 3336
		cmd.flow_attr.size -= kern_spec->size;
		kern_spec = ((void *)kern_spec) + kern_spec->size;
3337 3338
		ib_spec += ((union ib_flow_spec *) ib_spec)->size;
	}
3339 3340 3341
	if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
		pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
			i, cmd.flow_attr.size);
3342
		err = -EINVAL;
3343 3344
		goto err_free;
	}
3345

K
Kamal Heib 已提交
3346 3347
	flow_id = qp->device->ops.create_flow(
		qp, flow_attr, IB_FLOW_DOMAIN_USER, &attrs->driver_udata);
3348

3349 3350
	if (IS_ERR(flow_id)) {
		err = PTR_ERR(flow_id);
3351
		goto err_free;
3352
	}
3353 3354

	ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
3355 3356 3357 3358

	memset(&resp, 0, sizeof(resp));
	resp.flow_handle = uobj->id;

3359
	err = uverbs_response(attrs, &resp, sizeof(resp));
3360
	if (err)
3361 3362
		goto err_copy;

3363 3364
	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
3365 3366 3367
	kfree(flow_attr);
	if (cmd.flow_attr.num_of_specs)
		kfree(kern_flow_attr);
3368 3369
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
3370
err_copy:
K
Kamal Heib 已提交
3371
	if (!qp->device->ops.destroy_flow(flow_id))
3372
		atomic_dec(&qp->usecnt);
3373
err_free:
3374 3375
	ib_uverbs_flow_resources_free(uflow_res);
err_free_flow_attr:
3376 3377
	kfree(flow_attr);
err_put:
3378 3379
	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
3380
err_uobj:
3381
	uobj_alloc_abort(uobj, attrs);
3382 3383 3384 3385 3386 3387
err_free_attr:
	if (cmd.flow_attr.num_of_specs)
		kfree(kern_flow_attr);
	return err;
}

3388
static int ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle *attrs)
3389
{
3390 3391 3392
	struct ib_uverbs_destroy_flow	cmd;
	int				ret;

3393
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3394 3395
	if (ret)
		return ret;
3396

3397 3398 3399
	if (cmd.comp_mask)
		return -EINVAL;

3400
	return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, attrs);
3401 3402
}

3403
static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
S
Sean Hefty 已提交
3404 3405
				struct ib_uverbs_create_xsrq *cmd,
				struct ib_udata *udata)
3406 3407
{
	struct ib_uverbs_create_srq_resp resp;
3408
	struct ib_usrq_object           *obj;
3409 3410
	struct ib_pd                    *pd;
	struct ib_srq                   *srq;
3411
	struct ib_uobject               *uninitialized_var(xrcd_uobj);
3412 3413
	struct ib_srq_init_attr          attr;
	int ret;
3414
	struct ib_device *ib_dev;
3415

3416
	obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, attrs,
3417
						  &ib_dev);
3418 3419
	if (IS_ERR(obj))
		return PTR_ERR(obj);
3420

3421 3422 3423
	if (cmd->srq_type == IB_SRQT_TM)
		attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;

3424
	if (cmd->srq_type == IB_SRQT_XRC) {
3425
		xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
3426
					  attrs);
3427
		if (IS_ERR(xrcd_uobj)) {
3428
			ret = -EINVAL;
3429
			goto err;
3430 3431
		}

3432 3433 3434 3435 3436 3437
		attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
		if (!attr.ext.xrc.xrcd) {
			ret = -EINVAL;
			goto err_put_xrcd;
		}

3438 3439
		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
		atomic_inc(&obj->uxrcd->refcnt);
3440
	}
3441

3442
	if (ib_srq_has_cq(cmd->srq_type)) {
3443
		attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
3444
						cmd->cq_handle, attrs);
3445
		if (!attr.ext.cq) {
3446 3447 3448 3449 3450
			ret = -EINVAL;
			goto err_put_xrcd;
		}
	}

3451
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
3452 3453 3454
	if (!pd) {
		ret = -EINVAL;
		goto err_put_cq;
3455 3456
	}

3457
	attr.event_handler  = ib_uverbs_srq_event_handler;
3458 3459 3460 3461
	attr.srq_type       = cmd->srq_type;
	attr.attr.max_wr    = cmd->max_wr;
	attr.attr.max_sge   = cmd->max_sge;
	attr.attr.srq_limit = cmd->srq_limit;
3462

3463
	INIT_LIST_HEAD(&obj->uevent.event_list);
3464
	obj->uevent.uobject.user_handle = cmd->user_handle;
3465

3466 3467 3468 3469
	srq = ib_create_srq_user(pd, &attr, obj, udata);
	if (IS_ERR(srq)) {
		ret = PTR_ERR(srq);
		goto err_put_pd;
3470 3471 3472
	}

	obj->uevent.uobject.object = srq;
3473 3474 3475 3476
	obj->uevent.uobject.user_handle = cmd->user_handle;
	obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
	if (obj->uevent.event_file)
		uverbs_uobject_get(&obj->uevent.event_file->uobj);
3477

3478
	memset(&resp, 0, sizeof resp);
3479
	resp.srq_handle = obj->uevent.uobject.id;
3480 3481
	resp.max_wr     = attr.attr.max_wr;
	resp.max_sge    = attr.attr.max_sge;
3482 3483
	if (cmd->srq_type == IB_SRQT_XRC)
		resp.srqn = srq->ext.xrc.srq_num;
3484

3485 3486
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
3487
		goto err_copy;
3488

3489
	if (cmd->srq_type == IB_SRQT_XRC)
3490
		uobj_put_read(xrcd_uobj);
3491 3492

	if (ib_srq_has_cq(cmd->srq_type))
3493 3494
		rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
3495

3496
	uobj_put_obj_read(pd);
3497 3498
	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
	return 0;
3499

3500
err_copy:
3501 3502
	if (obj->uevent.event_file)
		uverbs_uobject_put(&obj->uevent.event_file->uobj);
3503
	ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
3504
err_put_pd:
3505
	uobj_put_obj_read(pd);
3506
err_put_cq:
3507
	if (ib_srq_has_cq(cmd->srq_type))
3508 3509
		rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
3510

3511 3512 3513
err_put_xrcd:
	if (cmd->srq_type == IB_SRQT_XRC) {
		atomic_dec(&obj->uxrcd->refcnt);
3514
		uobj_put_read(xrcd_uobj);
3515
	}
3516

3517
err:
3518
	uobj_alloc_abort(&obj->uevent.uobject, attrs);
3519 3520 3521
	return ret;
}

3522
static int ib_uverbs_create_srq(struct uverbs_attr_bundle *attrs)
3523 3524 3525
{
	struct ib_uverbs_create_srq      cmd;
	struct ib_uverbs_create_xsrq     xcmd;
3526
	int ret;
3527

3528 3529 3530
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3531

3532
	memset(&xcmd, 0, sizeof(xcmd));
3533 3534 3535 3536 3537 3538 3539 3540
	xcmd.response	 = cmd.response;
	xcmd.user_handle = cmd.user_handle;
	xcmd.srq_type	 = IB_SRQT_BASIC;
	xcmd.pd_handle	 = cmd.pd_handle;
	xcmd.max_wr	 = cmd.max_wr;
	xcmd.max_sge	 = cmd.max_sge;
	xcmd.srq_limit	 = cmd.srq_limit;

3541
	return __uverbs_create_xsrq(attrs, &xcmd, &attrs->driver_udata);
3542 3543
}

3544
static int ib_uverbs_create_xsrq(struct uverbs_attr_bundle *attrs)
3545 3546
{
	struct ib_uverbs_create_xsrq     cmd;
3547
	int ret;
3548

3549 3550 3551
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3552

3553
	return __uverbs_create_xsrq(attrs, &cmd, &attrs->driver_udata);
3554 3555
}

3556
static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
3557 3558 3559 3560 3561 3562
{
	struct ib_uverbs_modify_srq cmd;
	struct ib_srq              *srq;
	struct ib_srq_attr          attr;
	int                         ret;

3563 3564 3565
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3566

3567
	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3568 3569
	if (!srq)
		return -EINVAL;
3570 3571 3572 3573

	attr.max_wr    = cmd.max_wr;
	attr.srq_limit = cmd.srq_limit;

K
Kamal Heib 已提交
3574 3575
	ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask,
					  &attrs->driver_udata);
3576

3577 3578
	rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
3579

3580
	return ret;
3581 3582
}

3583
static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
3584 3585 3586 3587 3588 3589 3590
{
	struct ib_uverbs_query_srq      cmd;
	struct ib_uverbs_query_srq_resp resp;
	struct ib_srq_attr              attr;
	struct ib_srq                   *srq;
	int                             ret;

3591 3592 3593
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3594

3595
	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3596 3597
	if (!srq)
		return -EINVAL;
3598

3599
	ret = ib_query_srq(srq, &attr);
3600

3601 3602
	rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
3603 3604

	if (ret)
3605
		return ret;
3606 3607 3608 3609 3610 3611 3612

	memset(&resp, 0, sizeof resp);

	resp.max_wr    = attr.max_wr;
	resp.max_sge   = attr.max_sge;
	resp.srq_limit = attr.srq_limit;

3613
	return uverbs_response(attrs, &resp, sizeof(resp));
3614 3615
}

3616
static int ib_uverbs_destroy_srq(struct uverbs_attr_bundle *attrs)
3617
{
3618 3619
	struct ib_uverbs_destroy_srq      cmd;
	struct ib_uverbs_destroy_srq_resp resp;
3620 3621
	struct ib_uobject		 *uobj;
	struct ib_uevent_object        	 *obj;
3622
	int ret;
3623

3624 3625 3626
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3627

3628
	uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3629 3630 3631
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);

3632
	obj = container_of(uobj, struct ib_uevent_object, uobject);
3633
	memset(&resp, 0, sizeof(resp));
3634
	resp.events_reported = obj->events_reported;
3635 3636 3637

	uobj_put_destroy(uobj);

3638
	return uverbs_response(attrs, &resp, sizeof(resp));
3639
}
3640

3641
static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
3642
{
3643
	struct ib_uverbs_ex_query_device_resp resp = {};
3644
	struct ib_uverbs_ex_query_device  cmd;
3645
	struct ib_device_attr attr = {0};
3646 3647
	struct ib_ucontext *ucontext;
	struct ib_device *ib_dev;
3648 3649
	int err;

3650
	ucontext = ib_uverbs_get_ucontext(attrs);
3651 3652 3653 3654
	if (IS_ERR(ucontext))
		return PTR_ERR(ucontext);
	ib_dev = ucontext->device;

3655
	err = uverbs_request(attrs, &cmd, sizeof(cmd));
3656 3657 3658 3659 3660 3661 3662 3663 3664
	if (err)
		return err;

	if (cmd.comp_mask)
		return -EINVAL;

	if (cmd.reserved)
		return -EINVAL;

K
Kamal Heib 已提交
3665
	err = ib_dev->ops.query_device(ib_dev, &attr, &attrs->driver_udata);
3666 3667 3668
	if (err)
		return err;

3669
	copy_query_dev_fields(ucontext, &resp.base, &attr);
3670

3671 3672 3673 3674 3675 3676 3677
	resp.odp_caps.general_caps = attr.odp_caps.general_caps;
	resp.odp_caps.per_transport_caps.rc_odp_caps =
		attr.odp_caps.per_transport_caps.rc_odp_caps;
	resp.odp_caps.per_transport_caps.uc_odp_caps =
		attr.odp_caps.per_transport_caps.uc_odp_caps;
	resp.odp_caps.per_transport_caps.ud_odp_caps =
		attr.odp_caps.per_transport_caps.ud_odp_caps;
3678
	resp.xrc_odp_caps = attr.odp_caps.per_transport_caps.xrc_odp_caps;
3679 3680 3681

	resp.timestamp_mask = attr.timestamp_mask;
	resp.hca_core_clock = attr.hca_core_clock;
3682
	resp.device_cap_flags_ex = attr.device_cap_flags;
3683 3684 3685 3686 3687 3688
	resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
	resp.rss_caps.max_rwq_indirection_tables =
		attr.rss_caps.max_rwq_indirection_tables;
	resp.rss_caps.max_rwq_indirection_table_size =
		attr.rss_caps.max_rwq_indirection_table_size;
	resp.max_wq_type_rq = attr.max_wq_type_rq;
3689
	resp.raw_packet_caps = attr.raw_packet_caps;
3690 3691 3692 3693 3694
	resp.tm_caps.max_rndv_hdr_size	= attr.tm_caps.max_rndv_hdr_size;
	resp.tm_caps.max_num_tags	= attr.tm_caps.max_num_tags;
	resp.tm_caps.max_ops		= attr.tm_caps.max_ops;
	resp.tm_caps.max_sge		= attr.tm_caps.max_sge;
	resp.tm_caps.flags		= attr.tm_caps.flags;
3695 3696 3697 3698
	resp.cq_moderation_caps.max_cq_moderation_count  =
		attr.cq_caps.max_cq_moderation_count;
	resp.cq_moderation_caps.max_cq_moderation_period =
		attr.cq_caps.max_cq_moderation_period;
3699
	resp.max_dm_size = attr.max_dm_size;
3700 3701
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));

3702
	return uverbs_response(attrs, &resp, sizeof(resp));
3703
}
3704

3705
static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
3706
{
3707
	struct ib_uverbs_ex_modify_cq cmd;
3708 3709 3710
	struct ib_cq *cq;
	int ret;

3711
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3712 3713 3714 3715 3716 3717 3718 3719 3720
	if (ret)
		return ret;

	if (!cmd.attr_mask || cmd.reserved)
		return -EINVAL;

	if (cmd.attr_mask > IB_CQ_MODERATE)
		return -EOPNOTSUPP;

3721
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
3722 3723 3724
	if (!cq)
		return -EINVAL;

3725
	ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
3726

3727 3728
	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
3729 3730
	return ret;
}
3731

3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770
/*
 * Describe the input structs for write(). Some write methods have an input
 * only struct, most have an input and output. If the struct has an output then
 * the 'response' u64 must be the first field in the request structure.
 *
 * If udata is present then both the request and response structs have a
 * trailing driver_data flex array. In this case the size of the base struct
 * cannot be changed.
 */
#define UAPI_DEF_WRITE_IO(req, resp)                                           \
	.write.has_resp = 1 +                                                  \
			  BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) +    \
			  BUILD_BUG_ON_ZERO(sizeof(((req *)0)->response) !=    \
					    sizeof(u64)),                      \
	.write.req_size = sizeof(req), .write.resp_size = sizeof(resp)

#define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req)

#define UAPI_DEF_WRITE_UDATA_IO(req, resp)                                     \
	UAPI_DEF_WRITE_IO(req, resp),                                          \
		.write.has_udata =                                             \
			1 +                                                    \
			BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=        \
					  sizeof(req)) +                       \
			BUILD_BUG_ON_ZERO(offsetof(resp, driver_data) !=       \
					  sizeof(resp))

#define UAPI_DEF_WRITE_UDATA_I(req)                                            \
	UAPI_DEF_WRITE_I(req),                                                 \
		.write.has_udata =                                             \
			1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=    \
					      sizeof(req))

/*
 * The _EX versions are for use with WRITE_EX and allow the last struct member
 * to be specified. Buffers that do not include that member will be rejected.
 */
#define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member)     \
	.write.has_resp = 1,                                                   \
3771 3772
	.write.req_size = offsetofend(req, req_last_member),                   \
	.write.resp_size = offsetofend(resp, resp_last_member)
3773 3774

#define UAPI_DEF_WRITE_I_EX(req, req_last_member)                              \
3775
	.write.req_size = offsetofend(req, req_last_member)
3776

3777
const struct uapi_definition uverbs_def_write_intf[] = {
3778 3779 3780 3781
	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_AH,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_AH,
				     ib_uverbs_create_ah,
3782 3783 3784
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_ah,
					     struct ib_uverbs_create_ah_resp),
3785
				     UAPI_DEF_METHOD_NEEDS_FN(create_ah)),
3786 3787 3788 3789 3790
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DESTROY_AH,
			ib_uverbs_destroy_ah,
			UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_ah))),
3791 3792 3793

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_COMP_CHANNEL,
3794 3795 3796 3797 3798 3799
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL,
			ib_uverbs_create_comp_channel,
			UAPI_DEF_WRITE_IO(
				struct ib_uverbs_create_comp_channel,
				struct ib_uverbs_create_comp_channel_resp))),
3800 3801 3802 3803

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_CQ,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_CQ,
3804
				     ib_uverbs_create_cq,
3805 3806 3807
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_cq,
					     struct ib_uverbs_create_cq_resp),
3808
				     UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DESTROY_CQ,
			ib_uverbs_destroy_cq,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_cq,
					  struct ib_uverbs_destroy_cq_resp),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_cq)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_POLL_CQ,
			ib_uverbs_poll_cq,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_poll_cq,
					  struct ib_uverbs_poll_cq_resp),
			UAPI_DEF_METHOD_NEEDS_FN(poll_cq)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
			ib_uverbs_req_notify_cq,
			UAPI_DEF_WRITE_I(struct ib_uverbs_req_notify_cq),
			UAPI_DEF_METHOD_NEEDS_FN(req_notify_cq)),
3826
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_RESIZE_CQ,
3827
				     ib_uverbs_resize_cq,
3828 3829 3830
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_resize_cq,
					     struct ib_uverbs_resize_cq_resp),
3831
				     UAPI_DEF_METHOD_NEEDS_FN(resize_cq)),
3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_CQ,
			ib_uverbs_ex_create_cq,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_cq,
					     reserved,
					     struct ib_uverbs_ex_create_cq_resp,
					     response_length),
			UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_MODIFY_CQ,
			ib_uverbs_ex_modify_cq,
			UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq),
			UAPI_DEF_METHOD_NEEDS_FN(create_cq))),
3845 3846 3847 3848

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_DEVICE,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_GET_CONTEXT,
3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872
				     ib_uverbs_get_context,
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_get_context,
					     struct ib_uverbs_get_context_resp)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_QUERY_DEVICE,
			ib_uverbs_query_device,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_device,
					  struct ib_uverbs_query_device_resp)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_QUERY_PORT,
			ib_uverbs_query_port,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_port,
					  struct ib_uverbs_query_port_resp),
			UAPI_DEF_METHOD_NEEDS_FN(query_port)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_QUERY_DEVICE,
			ib_uverbs_ex_query_device,
			UAPI_DEF_WRITE_IO_EX(
				struct ib_uverbs_ex_query_device,
				reserved,
				struct ib_uverbs_ex_query_device_resp,
				response_length),
			UAPI_DEF_METHOD_NEEDS_FN(query_device)),
3873 3874
		UAPI_DEF_OBJ_NEEDS_FN(alloc_ucontext),
		UAPI_DEF_OBJ_NEEDS_FN(dealloc_ucontext)),
3875 3876 3877

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_FLOW,
3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_FLOW,
			ib_uverbs_ex_create_flow,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_create_flow,
					     flow_attr,
					     struct ib_uverbs_create_flow_resp,
					     flow_handle),
			UAPI_DEF_METHOD_NEEDS_FN(create_flow)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
			ib_uverbs_ex_destroy_flow,
			UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_flow),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_flow))),
3891 3892 3893 3894 3895

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_MR,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_DEREG_MR,
				     ib_uverbs_dereg_mr,
3896
				     UAPI_DEF_WRITE_I(struct ib_uverbs_dereg_mr),
3897
				     UAPI_DEF_METHOD_NEEDS_FN(dereg_mr)),
3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_REG_MR,
			ib_uverbs_reg_mr,
			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_reg_mr,
						struct ib_uverbs_reg_mr_resp),
			UAPI_DEF_METHOD_NEEDS_FN(reg_user_mr)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_REREG_MR,
			ib_uverbs_rereg_mr,
			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_rereg_mr,
						struct ib_uverbs_rereg_mr_resp),
			UAPI_DEF_METHOD_NEEDS_FN(rereg_user_mr))),
3910 3911 3912

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_MW,
3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_ALLOC_MW,
			ib_uverbs_alloc_mw,
			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_mw,
						struct ib_uverbs_alloc_mw_resp),
			UAPI_DEF_METHOD_NEEDS_FN(alloc_mw)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DEALLOC_MW,
			ib_uverbs_dealloc_mw,
			UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_mw),
			UAPI_DEF_METHOD_NEEDS_FN(dealloc_mw))),
3924 3925 3926

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_PD,
3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_ALLOC_PD,
			ib_uverbs_alloc_pd,
			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_pd,
						struct ib_uverbs_alloc_pd_resp),
			UAPI_DEF_METHOD_NEEDS_FN(alloc_pd)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DEALLOC_PD,
			ib_uverbs_dealloc_pd,
			UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_pd),
			UAPI_DEF_METHOD_NEEDS_FN(dealloc_pd))),
3938 3939 3940

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_QP,
3941 3942 3943 3944 3945 3946
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_ATTACH_MCAST,
			ib_uverbs_attach_mcast,
			UAPI_DEF_WRITE_I(struct ib_uverbs_attach_mcast),
			UAPI_DEF_METHOD_NEEDS_FN(attach_mcast),
			UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
3947
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_QP,
3948
				     ib_uverbs_create_qp,
3949 3950 3951
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_qp,
					     struct ib_uverbs_create_qp_resp),
3952
				     UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DESTROY_QP,
			ib_uverbs_destroy_qp,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_qp,
					  struct ib_uverbs_destroy_qp_resp),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_qp)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DETACH_MCAST,
			ib_uverbs_detach_mcast,
			UAPI_DEF_WRITE_I(struct ib_uverbs_detach_mcast),
			UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_MODIFY_QP,
			ib_uverbs_modify_qp,
			UAPI_DEF_WRITE_I(struct ib_uverbs_modify_qp),
			UAPI_DEF_METHOD_NEEDS_FN(modify_qp)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_POST_RECV,
			ib_uverbs_post_recv,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_post_recv,
					  struct ib_uverbs_post_recv_resp),
			UAPI_DEF_METHOD_NEEDS_FN(post_recv)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_POST_SEND,
			ib_uverbs_post_send,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_post_send,
					  struct ib_uverbs_post_send_resp),
			UAPI_DEF_METHOD_NEEDS_FN(post_send)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_QUERY_QP,
			ib_uverbs_query_qp,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_qp,
					  struct ib_uverbs_query_qp_resp),
			UAPI_DEF_METHOD_NEEDS_FN(query_qp)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_QP,
			ib_uverbs_ex_create_qp,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_qp,
					     comp_mask,
					     struct ib_uverbs_ex_create_qp_resp,
					     response_length),
			UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_MODIFY_QP,
			ib_uverbs_ex_modify_qp,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_modify_qp,
					     base,
					     struct ib_uverbs_ex_modify_qp_resp,
					     response_length),
			UAPI_DEF_METHOD_NEEDS_FN(modify_qp))),
4003 4004 4005

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_RWQ_IND_TBL,
4006 4007 4008
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
			ib_uverbs_ex_create_rwq_ind_table,
4009 4010 4011 4012 4013
			UAPI_DEF_WRITE_IO_EX(
				struct ib_uverbs_ex_create_rwq_ind_table,
				log_ind_tbl_size,
				struct ib_uverbs_ex_create_rwq_ind_table_resp,
				ind_tbl_num),
4014 4015 4016 4017
			UAPI_DEF_METHOD_NEEDS_FN(create_rwq_ind_table)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
			ib_uverbs_ex_destroy_rwq_ind_table,
4018 4019
			UAPI_DEF_WRITE_I(
				struct ib_uverbs_ex_destroy_rwq_ind_table),
4020
			UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))),
4021 4022 4023

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_WQ,
4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_WQ,
			ib_uverbs_ex_create_wq,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_wq,
					     max_sge,
					     struct ib_uverbs_ex_create_wq_resp,
					     wqn),
			UAPI_DEF_METHOD_NEEDS_FN(create_wq)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_DESTROY_WQ,
			ib_uverbs_ex_destroy_wq,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_destroy_wq,
					     wq_handle,
					     struct ib_uverbs_ex_destroy_wq_resp,
					     reserved),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_wq)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_MODIFY_WQ,
			ib_uverbs_ex_modify_wq,
			UAPI_DEF_WRITE_I_EX(struct ib_uverbs_ex_modify_wq,
					    curr_wq_state),
			UAPI_DEF_METHOD_NEEDS_FN(modify_wq))),
4046 4047 4048 4049

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_SRQ,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_SRQ,
4050
				     ib_uverbs_create_srq,
4051 4052 4053
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_srq,
					     struct ib_uverbs_create_srq_resp),
4054
				     UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
4055
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_XSRQ,
4056
				     ib_uverbs_create_xsrq,
4057 4058 4059
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_xsrq,
					     struct ib_uverbs_create_srq_resp),
4060
				     UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DESTROY_SRQ,
			ib_uverbs_destroy_srq,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_srq,
					  struct ib_uverbs_destroy_srq_resp),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_srq)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_MODIFY_SRQ,
			ib_uverbs_modify_srq,
			UAPI_DEF_WRITE_UDATA_I(struct ib_uverbs_modify_srq),
			UAPI_DEF_METHOD_NEEDS_FN(modify_srq)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_POST_SRQ_RECV,
			ib_uverbs_post_srq_recv,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_post_srq_recv,
					  struct ib_uverbs_post_srq_recv_resp),
			UAPI_DEF_METHOD_NEEDS_FN(post_srq_recv)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_QUERY_SRQ,
			ib_uverbs_query_srq,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_srq,
					  struct ib_uverbs_query_srq_resp),
			UAPI_DEF_METHOD_NEEDS_FN(query_srq))),
4084 4085 4086

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_XRCD,
4087 4088 4089 4090 4091
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_CLOSE_XRCD,
			ib_uverbs_close_xrcd,
			UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd),
			UAPI_DEF_METHOD_NEEDS_FN(dealloc_xrcd)),
4092
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP,
4093 4094 4095 4096
				     ib_uverbs_open_qp,
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_open_qp,
					     struct ib_uverbs_create_qp_resp)),
4097 4098
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_XRCD,
				     ib_uverbs_open_xrcd,
4099 4100 4101
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_open_xrcd,
					     struct ib_uverbs_open_xrcd_resp),
4102
				     UAPI_DEF_METHOD_NEEDS_FN(alloc_xrcd))),
4103 4104 4105

	{},
};