uverbs_cmd.c 105.9 KB
Newer Older
1 2
/*
 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3
 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
5
 * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

36
#include <linux/file.h>
37
#include <linux/fs.h>
38
#include <linux/slab.h>
39
#include <linux/sched.h>
40

41
#include <linux/uaccess.h>
42

43 44 45 46
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_std_types.h>
#include "rdma_core.h"

47
#include "uverbs.h"
48
#include "core_priv.h"
49

50 51 52 53 54 55 56 57 58 59 60 61 62
/*
 * Copy a response to userspace. If the provided 'resp' is larger than the
 * user buffer it is silently truncated. If the user provided a larger buffer
 * then the trailing portion is zero filled.
 *
 * These semantics are intended to support future extension of the output
 * structures.
 */
static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
			   size_t resp_len)
{
	int ret;

63 64 65 66
	if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
		return uverbs_copy_to_struct_or_zero(
			attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);

67 68 69 70
	if (copy_to_user(attrs->ucore.outbuf, resp,
			 min(attrs->ucore.outlen, resp_len)))
		return -EFAULT;

71 72 73 74 75 76 77
	if (resp_len < attrs->ucore.outlen) {
		/*
		 * Zero fill any extra memory that user
		 * space might have provided.
		 */
		ret = clear_user(attrs->ucore.outbuf + resp_len,
				 attrs->ucore.outlen - resp_len);
78
		if (ret)
79
			return -EFAULT;
80 81 82 83 84
	}

	return 0;
}

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
/*
 * Copy a request from userspace. If the provided 'req' is larger than the
 * user buffer then the user buffer is zero extended into the 'req'. If 'req'
 * is smaller than the user buffer then the uncopied bytes in the user buffer
 * must be zero.
 */
static int uverbs_request(struct uverbs_attr_bundle *attrs, void *req,
			  size_t req_len)
{
	if (copy_from_user(req, attrs->ucore.inbuf,
			   min(attrs->ucore.inlen, req_len)))
		return -EFAULT;

	if (attrs->ucore.inlen < req_len) {
		memset(req + attrs->ucore.inlen, 0,
		       req_len - attrs->ucore.inlen);
	} else if (attrs->ucore.inlen > req_len) {
		if (!ib_is_buffer_cleared(attrs->ucore.inbuf + req_len,
					  attrs->ucore.inlen - req_len))
			return -EOPNOTSUPP;
	}
	return 0;
}

109 110 111 112 113 114 115 116 117 118 119 120
/*
 * Generate the value for the 'response_length' protocol used by write_ex.
 * This is the number of bytes the kernel actually wrote. Userspace can use
 * this to detect what structure members in the response the kernel
 * understood.
 */
static u32 uverbs_response_length(struct uverbs_attr_bundle *attrs,
				  size_t resp_len)
{
	return min_t(size_t, attrs->ucore.outlen, resp_len);
}

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
/*
 * The iterator version of the request interface is for handlers that need to
 * step over a flex array at the end of a command header.
 */
struct uverbs_req_iter {
	const void __user *cur;
	const void __user *end;
};

static int uverbs_request_start(struct uverbs_attr_bundle *attrs,
				struct uverbs_req_iter *iter,
				void *req,
				size_t req_len)
{
	if (attrs->ucore.inlen < req_len)
		return -ENOSPC;

	if (copy_from_user(req, attrs->ucore.inbuf, req_len))
		return -EFAULT;

	iter->cur = attrs->ucore.inbuf + req_len;
	iter->end = attrs->ucore.inbuf + attrs->ucore.inlen;
	return 0;
}

static int uverbs_request_next(struct uverbs_req_iter *iter, void *val,
			       size_t len)
{
	if (iter->cur + len > iter->end)
		return -ENOSPC;

	if (copy_from_user(val, iter->cur, len))
		return -EFAULT;

	iter->cur += len;
	return 0;
}

159 160 161 162 163 164
static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
						  size_t len)
{
	const void __user *res = iter->cur;

	if (iter->cur + len > iter->end)
165
		return (void __force __user *)ERR_PTR(-ENOSPC);
166 167 168 169
	iter->cur += len;
	return res;
}

170 171 172 173 174 175 176
static int uverbs_request_finish(struct uverbs_req_iter *iter)
{
	if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur))
		return -EOPNOTSUPP;
	return 0;
}

177 178 179 180 181 182 183 184 185 186 187
/*
 * When calling a destroy function during an error unwind we need to pass in
 * the udata that is sanitized of all user arguments. Ie from the driver
 * perspective it looks like no udata was passed.
 */
struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs)
{
	attrs->driver_udata = (struct ib_udata){};
	return &attrs->driver_udata;
}

188
static struct ib_uverbs_completion_event_file *
189
_ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
190
{
191
	struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
192
					       fd, attrs);
193 194 195 196 197 198 199

	if (IS_ERR(uobj))
		return (void *)uobj;

	uverbs_uobject_get(uobj);
	uobj_put_read(uobj);

200 201
	return container_of(uobj, struct ib_uverbs_completion_event_file,
			    uobj);
202
}
203 204
#define ib_uverbs_lookup_comp_file(_fd, _ufile)                                \
	_ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
205

206
static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
207
{
208
	struct ib_uverbs_file *file = attrs->ufile;
209 210
	struct ib_uverbs_get_context      cmd;
	struct ib_uverbs_get_context_resp resp;
211
	struct ib_ucontext		 *ucontext;
212
	struct file			 *filp;
213
	struct ib_rdmacg_object		 cg_obj;
214
	struct ib_device *ib_dev;
215
	int ret;
216

217 218 219
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
220

221
	mutex_lock(&file->ucontext_lock);
222 223 224 225 226 227
	ib_dev = srcu_dereference(file->device->ib_dev,
				  &file->device->disassociate_srcu);
	if (!ib_dev) {
		ret = -EIO;
		goto err;
	}
228 229 230 231 232 233

	if (file->ucontext) {
		ret = -EINVAL;
		goto err;
	}

234 235 236 237
	ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
	if (ret)
		goto err;

238 239 240
	ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
	if (!ucontext) {
		ret = -ENOMEM;
241
		goto err_alloc;
242
	}
243

244 245
	attrs->context = ucontext;

246
	ucontext->res.type = RDMA_RESTRACK_CTX;
247
	ucontext->device = ib_dev;
248
	ucontext->cg_obj = cg_obj;
249 250
	/* ufile is required when some objects are released */
	ucontext->ufile = file;
251

252
	ucontext->closing = false;
253
	ucontext->cleanup_retryable = false;
254

255 256
	xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);

257
	ret = get_unused_fd_flags(O_CLOEXEC);
258 259 260 261
	if (ret < 0)
		goto err_free;
	resp.async_fd = ret;

262
	filp = ib_uverbs_alloc_async_event_file(file, ib_dev);
263 264
	if (IS_ERR(filp)) {
		ret = PTR_ERR(filp);
265
		goto err_fd;
266
	}
267

268 269
	resp.num_comp_vectors = file->device->num_comp_vectors;

270 271
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
272
		goto err_file;
273

274 275 276
	ret = ib_dev->ops.alloc_ucontext(ucontext, &attrs->driver_udata);
	if (ret)
		goto err_file;
277

278
	rdma_restrack_uadd(&ucontext->res);
279

280 281
	fd_install(resp.async_fd, filp);

282 283 284 285 286 287
	/*
	 * Make sure that ib_uverbs_get_ucontext() sees the pointer update
	 * only after all writes to setup the ucontext have completed
	 */
	smp_store_release(&file->ucontext, ucontext);

288
	mutex_unlock(&file->ucontext_lock);
289

290
	return 0;
291

292
err_file:
293
	ib_uverbs_free_async_event_file(file);
294 295
	fput(filp);

296 297 298
err_fd:
	put_unused_fd(resp.async_fd);

299
err_free:
300
	kfree(ucontext);
301

302 303 304
err_alloc:
	ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);

305
err:
306
	mutex_unlock(&file->ucontext_lock);
307
	return ret;
308 309
}

310
static void copy_query_dev_fields(struct ib_ucontext *ucontext,
311 312 313
				  struct ib_uverbs_query_device_resp *resp,
				  struct ib_device_attr *attr)
{
314 315
	struct ib_device *ib_dev = ucontext->device;

316
	resp->fw_ver		= attr->fw_ver;
317
	resp->node_guid		= ib_dev->node_guid;
318 319 320 321 322 323 324 325
	resp->sys_image_guid	= attr->sys_image_guid;
	resp->max_mr_size	= attr->max_mr_size;
	resp->page_size_cap	= attr->page_size_cap;
	resp->vendor_id		= attr->vendor_id;
	resp->vendor_part_id	= attr->vendor_part_id;
	resp->hw_ver		= attr->hw_ver;
	resp->max_qp		= attr->max_qp;
	resp->max_qp_wr		= attr->max_qp_wr;
326
	resp->device_cap_flags	= lower_32_bits(attr->device_cap_flags);
327
	resp->max_sge		= min(attr->max_send_sge, attr->max_recv_sge);
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	resp->max_sge_rd	= attr->max_sge_rd;
	resp->max_cq		= attr->max_cq;
	resp->max_cqe		= attr->max_cqe;
	resp->max_mr		= attr->max_mr;
	resp->max_pd		= attr->max_pd;
	resp->max_qp_rd_atom	= attr->max_qp_rd_atom;
	resp->max_ee_rd_atom	= attr->max_ee_rd_atom;
	resp->max_res_rd_atom	= attr->max_res_rd_atom;
	resp->max_qp_init_rd_atom	= attr->max_qp_init_rd_atom;
	resp->max_ee_init_rd_atom	= attr->max_ee_init_rd_atom;
	resp->atomic_cap		= attr->atomic_cap;
	resp->max_ee			= attr->max_ee;
	resp->max_rdd			= attr->max_rdd;
	resp->max_mw			= attr->max_mw;
	resp->max_raw_ipv6_qp		= attr->max_raw_ipv6_qp;
	resp->max_raw_ethy_qp		= attr->max_raw_ethy_qp;
	resp->max_mcast_grp		= attr->max_mcast_grp;
	resp->max_mcast_qp_attach	= attr->max_mcast_qp_attach;
	resp->max_total_mcast_qp_attach	= attr->max_total_mcast_qp_attach;
	resp->max_ah			= attr->max_ah;
	resp->max_fmr			= attr->max_fmr;
	resp->max_map_per_fmr		= attr->max_map_per_fmr;
	resp->max_srq			= attr->max_srq;
	resp->max_srq_wr		= attr->max_srq_wr;
	resp->max_srq_sge		= attr->max_srq_sge;
	resp->max_pkeys			= attr->max_pkeys;
	resp->local_ca_ack_delay	= attr->local_ca_ack_delay;
355
	resp->phys_port_cnt		= ib_dev->phys_port_cnt;
356 357
}

358
static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs)
359 360 361
{
	struct ib_uverbs_query_device      cmd;
	struct ib_uverbs_query_device_resp resp;
362
	struct ib_ucontext *ucontext;
363
	int ret;
364

365
	ucontext = ib_uverbs_get_ucontext(attrs);
366 367
	if (IS_ERR(ucontext))
		return PTR_ERR(ucontext);
368

369 370 371
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
372 373

	memset(&resp, 0, sizeof resp);
374
	copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs);
375

376
	return uverbs_response(attrs, &resp, sizeof(resp));
377 378
}

379
static int ib_uverbs_query_port(struct uverbs_attr_bundle *attrs)
380 381 382 383 384
{
	struct ib_uverbs_query_port      cmd;
	struct ib_uverbs_query_port_resp resp;
	struct ib_port_attr              attr;
	int                              ret;
385 386 387
	struct ib_ucontext *ucontext;
	struct ib_device *ib_dev;

388
	ucontext = ib_uverbs_get_ucontext(attrs);
389 390 391
	if (IS_ERR(ucontext))
		return PTR_ERR(ucontext);
	ib_dev = ucontext->device;
392

393 394 395
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
396

397
	ret = ib_query_port(ib_dev, cmd.port_num, &attr);
398 399 400 401
	if (ret)
		return ret;

	memset(&resp, 0, sizeof resp);
402
	copy_port_attr_to_resp(&attr, &resp, ib_dev, cmd.port_num);
403

404
	return uverbs_response(attrs, &resp, sizeof(resp));
405 406
}

407
static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
408 409 410 411 412 413
{
	struct ib_uverbs_alloc_pd      cmd;
	struct ib_uverbs_alloc_pd_resp resp;
	struct ib_uobject             *uobj;
	struct ib_pd                  *pd;
	int                            ret;
414
	struct ib_device *ib_dev;
415

416 417 418
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
419

420
	uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev);
421 422
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
423

424 425 426
	pd = rdma_zalloc_drv_obj(ib_dev, ib_pd);
	if (!pd) {
		ret = -ENOMEM;
427 428 429
		goto err;
	}

430
	pd->device  = ib_dev;
431
	pd->uobject = uobj;
432
	pd->__internal_mr = NULL;
433
	atomic_set(&pd->usecnt, 0);
434 435
	pd->res.type = RDMA_RESTRACK_PD;

436
	ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata);
437 438
	if (ret)
		goto err_alloc;
439

440
	uobj->object = pd;
441 442
	memset(&resp, 0, sizeof resp);
	resp.pd_handle = uobj->id;
443
	rdma_restrack_uadd(&pd->res);
444

445 446
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
447
		goto err_copy;
448

449 450
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
451

452
err_copy:
453
	ib_dealloc_pd_user(pd, uverbs_get_cleared_udata(attrs));
454
	pd = NULL;
455 456
err_alloc:
	kfree(pd);
457
err:
458
	uobj_alloc_abort(uobj, attrs);
459 460 461
	return ret;
}

462
static int ib_uverbs_dealloc_pd(struct uverbs_attr_bundle *attrs)
463 464
{
	struct ib_uverbs_dealloc_pd cmd;
465
	int ret;
466

467 468 469
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
470

471
	return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
472 473
}

474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
struct xrcd_table_entry {
	struct rb_node  node;
	struct ib_xrcd *xrcd;
	struct inode   *inode;
};

static int xrcd_table_insert(struct ib_uverbs_device *dev,
			    struct inode *inode,
			    struct ib_xrcd *xrcd)
{
	struct xrcd_table_entry *entry, *scan;
	struct rb_node **p = &dev->xrcd_tree.rb_node;
	struct rb_node *parent = NULL;

	entry = kmalloc(sizeof *entry, GFP_KERNEL);
	if (!entry)
		return -ENOMEM;

	entry->xrcd  = xrcd;
	entry->inode = inode;

	while (*p) {
		parent = *p;
		scan = rb_entry(parent, struct xrcd_table_entry, node);

		if (inode < scan->inode) {
			p = &(*p)->rb_left;
		} else if (inode > scan->inode) {
			p = &(*p)->rb_right;
		} else {
			kfree(entry);
			return -EEXIST;
		}
	}

	rb_link_node(&entry->node, parent, p);
	rb_insert_color(&entry->node, &dev->xrcd_tree);
	igrab(inode);
	return 0;
}

static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
						  struct inode *inode)
{
	struct xrcd_table_entry *entry;
	struct rb_node *p = dev->xrcd_tree.rb_node;

	while (p) {
		entry = rb_entry(p, struct xrcd_table_entry, node);

		if (inode < entry->inode)
			p = p->rb_left;
		else if (inode > entry->inode)
			p = p->rb_right;
		else
			return entry;
	}

	return NULL;
}

static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
{
	struct xrcd_table_entry *entry;

	entry = xrcd_table_search(dev, inode);
	if (!entry)
		return NULL;

	return entry->xrcd;
}

static void xrcd_table_delete(struct ib_uverbs_device *dev,
			      struct inode *inode)
{
	struct xrcd_table_entry *entry;

	entry = xrcd_table_search(dev, inode);
	if (entry) {
		iput(inode);
		rb_erase(&entry->node, &dev->xrcd_tree);
		kfree(entry);
	}
}

559
static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
560
{
561
	struct ib_uverbs_device *ibudev = attrs->ufile->device;
562 563 564 565
	struct ib_uverbs_open_xrcd	cmd;
	struct ib_uverbs_open_xrcd_resp	resp;
	struct ib_uxrcd_object         *obj;
	struct ib_xrcd                 *xrcd = NULL;
566
	struct fd			f = {NULL, 0};
567
	struct inode                   *inode = NULL;
568
	int				ret = 0;
569
	int				new_xrcd = 0;
570
	struct ib_device *ib_dev;
571

572 573 574
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
575

576
	mutex_lock(&ibudev->xrcd_tree_mutex);
577 578 579

	if (cmd.fd != -1) {
		/* search for file descriptor */
580 581
		f = fdget(cmd.fd);
		if (!f.file) {
582 583 584 585
			ret = -EBADF;
			goto err_tree_mutex_unlock;
		}

A
Al Viro 已提交
586
		inode = file_inode(f.file);
587
		xrcd = find_xrcd(ibudev, inode);
588 589 590 591 592 593 594 595 596 597 598 599
		if (!xrcd && !(cmd.oflags & O_CREAT)) {
			/* no file descriptor. Need CREATE flag */
			ret = -EAGAIN;
			goto err_tree_mutex_unlock;
		}

		if (xrcd && cmd.oflags & O_EXCL) {
			ret = -EINVAL;
			goto err_tree_mutex_unlock;
		}
	}

600
	obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, attrs,
601
						   &ib_dev);
602 603
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
604 605 606 607
		goto err_tree_mutex_unlock;
	}

	if (!xrcd) {
608
		xrcd = ib_dev->ops.alloc_xrcd(ib_dev, &attrs->driver_udata);
609 610 611 612 613 614
		if (IS_ERR(xrcd)) {
			ret = PTR_ERR(xrcd);
			goto err;
		}

		xrcd->inode   = inode;
615
		xrcd->device  = ib_dev;
616 617 618 619 620 621 622 623 624 625 626 627 628 629
		atomic_set(&xrcd->usecnt, 0);
		mutex_init(&xrcd->tgt_qp_mutex);
		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
		new_xrcd = 1;
	}

	atomic_set(&obj->refcnt, 0);
	obj->uobject.object = xrcd;
	memset(&resp, 0, sizeof resp);
	resp.xrcd_handle = obj->uobject.id;

	if (inode) {
		if (new_xrcd) {
			/* create new inode/xrcd table entry */
630
			ret = xrcd_table_insert(ibudev, inode, xrcd);
631
			if (ret)
632
				goto err_dealloc_xrcd;
633 634 635 636
		}
		atomic_inc(&xrcd->usecnt);
	}

637 638
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
639 640
		goto err_copy;

641 642
	if (f.file)
		fdput(f);
643

644
	mutex_unlock(&ibudev->xrcd_tree_mutex);
645

646 647
	rdma_alloc_commit_uobject(&obj->uobject, attrs);
	return 0;
648 649 650 651

err_copy:
	if (inode) {
		if (new_xrcd)
652
			xrcd_table_delete(ibudev, inode);
653 654 655
		atomic_dec(&xrcd->usecnt);
	}

656
err_dealloc_xrcd:
657
	ib_dealloc_xrcd(xrcd, uverbs_get_cleared_udata(attrs));
658 659

err:
660
	uobj_alloc_abort(&obj->uobject, attrs);
661 662

err_tree_mutex_unlock:
663 664
	if (f.file)
		fdput(f);
665

666
	mutex_unlock(&ibudev->xrcd_tree_mutex);
667 668 669 670

	return ret;
}

671
static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs)
672 673
{
	struct ib_uverbs_close_xrcd cmd;
674
	int ret;
675

676 677 678
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
679

680
	return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs);
681 682
}

683
int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
684 685
			   enum rdma_remove_reason why,
			   struct uverbs_attr_bundle *attrs)
686 687
{
	struct inode *inode;
688
	int ret;
689
	struct ib_uverbs_device *dev = attrs->ufile->device;
690 691 692

	inode = xrcd->inode;
	if (inode && !atomic_dec_and_test(&xrcd->usecnt))
693
		return 0;
694

695
	ret = ib_dealloc_xrcd(xrcd, &attrs->driver_udata);
696

697
	if (ib_is_destroy_retryable(ret, why, uobject)) {
698
		atomic_inc(&xrcd->usecnt);
699 700 701 702
		return ret;
	}

	if (inode)
703
		xrcd_table_delete(dev, inode);
704 705

	return ret;
706 707
}

708
static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
709 710 711
{
	struct ib_uverbs_reg_mr      cmd;
	struct ib_uverbs_reg_mr_resp resp;
712
	struct ib_uobject           *uobj;
713 714 715
	struct ib_pd                *pd;
	struct ib_mr                *mr;
	int                          ret;
716
	struct ib_device *ib_dev;
717

718 719 720
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
721 722 723 724

	if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
		return -EINVAL;

725 726 727
	ret = ib_check_mr_access(cmd.access_flags);
	if (ret)
		return ret;
728

729
	uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
730 731
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
732

733
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
734 735
	if (!pd) {
		ret = -EINVAL;
736
		goto err_free;
737
	}
738

739
	if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
740 741
		if (!(pd->device->attrs.device_cap_flags &
		      IB_DEVICE_ON_DEMAND_PAGING)) {
742 743 744 745 746 747
			pr_debug("ODP support not available\n");
			ret = -EINVAL;
			goto err_put;
		}
	}

K
Kamal Heib 已提交
748 749 750
	mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
					 cmd.access_flags,
					 &attrs->driver_udata);
751 752
	if (IS_ERR(mr)) {
		ret = PTR_ERR(mr);
753
		goto err_put;
754 755 756 757
	}

	mr->device  = pd->device;
	mr->pd      = pd;
758
	mr->type    = IB_MR_TYPE_USER;
759
	mr->dm	    = NULL;
760
	mr->sig_attrs = NULL;
761
	mr->uobject = uobj;
762
	atomic_inc(&pd->usecnt);
763
	mr->res.type = RDMA_RESTRACK_MR;
764
	rdma_restrack_uadd(&mr->res);
765

766
	uobj->object = mr;
767

768 769 770
	memset(&resp, 0, sizeof resp);
	resp.lkey      = mr->lkey;
	resp.rkey      = mr->rkey;
771
	resp.mr_handle = uobj->id;
772

773 774
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
775
		goto err_copy;
776

777
	uobj_put_obj_read(pd);
778

779 780
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
781

782
err_copy:
783
	ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
784

785
err_put:
786
	uobj_put_obj_read(pd);
787 788

err_free:
789
	uobj_alloc_abort(uobj, attrs);
790 791 792
	return ret;
}

793
static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
794 795 796 797 798 799 800 801 802
{
	struct ib_uverbs_rereg_mr      cmd;
	struct ib_uverbs_rereg_mr_resp resp;
	struct ib_pd                *pd = NULL;
	struct ib_mr                *mr;
	struct ib_pd		    *old_pd;
	int                          ret;
	struct ib_uobject	    *uobj;

803 804 805
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
806 807 808 809 810 811 812 813 814

	if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
		return -EINVAL;

	if ((cmd.flags & IB_MR_REREG_TRANS) &&
	    (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
	     (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
			return -EINVAL;

815
	uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
816 817
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
818 819 820

	mr = uobj->object;

821 822 823 824 825
	if (mr->dm) {
		ret = -EINVAL;
		goto put_uobjs;
	}

826 827 828 829 830 831 832
	if (cmd.flags & IB_MR_REREG_ACCESS) {
		ret = ib_check_mr_access(cmd.access_flags);
		if (ret)
			goto put_uobjs;
	}

	if (cmd.flags & IB_MR_REREG_PD) {
833
		pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
834
				       attrs);
835 836 837 838 839 840
		if (!pd) {
			ret = -EINVAL;
			goto put_uobjs;
		}
	}

841
	old_pd = mr->pd;
K
Kamal Heib 已提交
842 843 844 845
	ret = mr->device->ops.rereg_user_mr(mr, cmd.flags, cmd.start,
					    cmd.length, cmd.hca_va,
					    cmd.access_flags, pd,
					    &attrs->driver_udata);
846
	if (ret)
847
		goto put_uobj_pd;
848 849 850 851 852

	if (cmd.flags & IB_MR_REREG_PD) {
		atomic_inc(&pd->usecnt);
		mr->pd = pd;
		atomic_dec(&old_pd->usecnt);
853 854 855 856 857 858
	}

	memset(&resp, 0, sizeof(resp));
	resp.lkey      = mr->lkey;
	resp.rkey      = mr->rkey;

859
	ret = uverbs_response(attrs, &resp, sizeof(resp));
860 861 862

put_uobj_pd:
	if (cmd.flags & IB_MR_REREG_PD)
863
		uobj_put_obj_read(pd);
864 865

put_uobjs:
866
	uobj_put_write(uobj);
867 868 869 870

	return ret;
}

871
static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs)
872 873
{
	struct ib_uverbs_dereg_mr cmd;
874
	int ret;
875

876 877 878
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
879

880
	return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
881 882
}

883
static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
884 885 886 887 888 889 890
{
	struct ib_uverbs_alloc_mw      cmd;
	struct ib_uverbs_alloc_mw_resp resp;
	struct ib_uobject             *uobj;
	struct ib_pd                  *pd;
	struct ib_mw                  *mw;
	int                            ret;
891
	struct ib_device *ib_dev;
892

893 894 895
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
896

897
	uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev);
898 899
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
900

901
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
902 903 904 905 906
	if (!pd) {
		ret = -EINVAL;
		goto err_free;
	}

907 908 909 910 911
	if (cmd.mw_type != IB_MW_TYPE_1 && cmd.mw_type != IB_MW_TYPE_2) {
		ret = -EINVAL;
		goto err_put;
	}

K
Kamal Heib 已提交
912
	mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
	if (IS_ERR(mw)) {
		ret = PTR_ERR(mw);
		goto err_put;
	}

	mw->device  = pd->device;
	mw->pd      = pd;
	mw->uobject = uobj;
	atomic_inc(&pd->usecnt);

	uobj->object = mw;

	memset(&resp, 0, sizeof(resp));
	resp.rkey      = mw->rkey;
	resp.mw_handle = uobj->id;

929 930
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
931 932
		goto err_copy;

933
	uobj_put_obj_read(pd);
934 935
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
936 937

err_copy:
938
	uverbs_dealloc_mw(mw);
939
err_put:
940
	uobj_put_obj_read(pd);
941
err_free:
942
	uobj_alloc_abort(uobj, attrs);
943 944 945
	return ret;
}

946
static int ib_uverbs_dealloc_mw(struct uverbs_attr_bundle *attrs)
947 948
{
	struct ib_uverbs_dealloc_mw cmd;
949
	int ret;
950

951 952 953
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
954

955
	return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, attrs);
956 957
}

958
static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs)
959 960 961
{
	struct ib_uverbs_create_comp_channel	   cmd;
	struct ib_uverbs_create_comp_channel_resp  resp;
962 963
	struct ib_uobject			  *uobj;
	struct ib_uverbs_completion_event_file	  *ev_file;
964
	struct ib_device *ib_dev;
965
	int ret;
966

967 968 969
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
970

971
	uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev);
972 973
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
974

975 976 977
	resp.fd = uobj->id;

	ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
978
			       uobj);
979
	ib_uverbs_init_event_queue(&ev_file->ev_queue);
980

981 982
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret) {
983
		uobj_alloc_abort(uobj, attrs);
984
		return ret;
985 986
	}

987 988
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
989 990
}

991
static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
992
				       struct ib_uverbs_ex_create_cq *cmd)
993
{
994
	struct ib_ucq_object           *obj;
995
	struct ib_uverbs_completion_event_file    *ev_file = NULL;
996 997
	struct ib_cq                   *cq;
	int                             ret;
998
	struct ib_uverbs_ex_create_cq_resp resp;
999
	struct ib_cq_init_attr attr = {};
1000
	struct ib_device *ib_dev;
1001

1002
	if (cmd->comp_vector >= attrs->ufile->device->num_comp_vectors)
1003
		return ERR_PTR(-EINVAL);
1004

1005
	obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, attrs,
1006
						 &ib_dev);
1007 1008
	if (IS_ERR(obj))
		return obj;
1009

1010
	if (cmd->comp_channel >= 0) {
1011
		ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, attrs);
1012 1013
		if (IS_ERR(ev_file)) {
			ret = PTR_ERR(ev_file);
1014 1015 1016 1017
			goto err;
		}
	}

1018
	obj->uevent.uobject.user_handle = cmd->user_handle;
1019
	INIT_LIST_HEAD(&obj->comp_list);
1020
	INIT_LIST_HEAD(&obj->uevent.event_list);
1021

1022 1023
	attr.cqe = cmd->cqe;
	attr.comp_vector = cmd->comp_vector;
1024
	attr.flags = cmd->flags;
1025

1026 1027 1028
	cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
	if (!cq) {
		ret = -ENOMEM;
1029
		goto err_file;
1030
	}
1031
	cq->device        = ib_dev;
1032
	cq->uobject       = obj;
1033 1034
	cq->comp_handler  = ib_uverbs_comp_handler;
	cq->event_handler = ib_uverbs_cq_event_handler;
1035
	cq->cq_context    = ev_file ? &ev_file->ev_queue : NULL;
1036 1037
	atomic_set(&cq->usecnt, 0);

1038 1039 1040 1041
	ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata);
	if (ret)
		goto err_free;

1042
	obj->uevent.uobject.object = cq;
1043
	memset(&resp, 0, sizeof resp);
1044
	resp.base.cq_handle = obj->uevent.uobject.id;
1045
	resp.base.cqe       = cq->cqe;
1046
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1047

1048
	cq->res.type = RDMA_RESTRACK_CQ;
1049
	rdma_restrack_uadd(&cq->res);
1050

1051
	ret = uverbs_response(attrs, &resp, sizeof(resp));
1052 1053
	if (ret)
		goto err_cb;
1054

1055
	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
1056
	return obj;
1057

1058
err_cb:
1059
	ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
1060 1061 1062
	cq = NULL;
err_free:
	kfree(cq);
1063
err_file:
1064
	if (ev_file)
1065
		ib_uverbs_release_ucq(attrs->ufile, ev_file, obj);
1066 1067

err:
1068
	uobj_alloc_abort(&obj->uevent.uobject, attrs);
1069 1070 1071 1072

	return ERR_PTR(ret);
}

1073
static int ib_uverbs_create_cq(struct uverbs_attr_bundle *attrs)
1074 1075 1076 1077
{
	struct ib_uverbs_create_cq      cmd;
	struct ib_uverbs_ex_create_cq	cmd_ex;
	struct ib_ucq_object           *obj;
1078
	int ret;
1079

1080 1081 1082
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1083 1084 1085 1086 1087 1088 1089

	memset(&cmd_ex, 0, sizeof(cmd_ex));
	cmd_ex.user_handle = cmd.user_handle;
	cmd_ex.cqe = cmd.cqe;
	cmd_ex.comp_vector = cmd.comp_vector;
	cmd_ex.comp_channel = cmd.comp_channel;

1090
	obj = create_cq(attrs, &cmd_ex);
1091
	return PTR_ERR_OR_ZERO(obj);
1092 1093
}

1094
static int ib_uverbs_ex_create_cq(struct uverbs_attr_bundle *attrs)
1095 1096 1097
{
	struct ib_uverbs_ex_create_cq  cmd;
	struct ib_ucq_object           *obj;
1098
	int ret;
1099

1100 1101 1102
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1103 1104 1105 1106 1107 1108 1109

	if (cmd.comp_mask)
		return -EINVAL;

	if (cmd.reserved)
		return -EINVAL;

1110
	obj = create_cq(attrs, &cmd);
G
Gomonovych, Vasyl 已提交
1111
	return PTR_ERR_OR_ZERO(obj);
1112 1113
}

1114
static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
1115 1116
{
	struct ib_uverbs_resize_cq	cmd;
1117
	struct ib_uverbs_resize_cq_resp	resp = {};
1118 1119 1120
	struct ib_cq			*cq;
	int				ret = -EINVAL;

1121 1122 1123
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1124

1125
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1126 1127
	if (!cq)
		return -EINVAL;
1128

K
Kamal Heib 已提交
1129
	ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
1130 1131 1132 1133 1134
	if (ret)
		goto out;

	resp.cqe = cq->cqe;

1135
	ret = uverbs_response(attrs, &resp, sizeof(resp));
1136
out:
1137 1138
	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
1139

1140
	return ret;
1141 1142
}

1143 1144
static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
			   struct ib_wc *wc)
1145 1146 1147 1148 1149 1150 1151 1152
{
	struct ib_uverbs_wc tmp;

	tmp.wr_id		= wc->wr_id;
	tmp.status		= wc->status;
	tmp.opcode		= wc->opcode;
	tmp.vendor_err		= wc->vendor_err;
	tmp.byte_len		= wc->byte_len;
1153
	tmp.ex.imm_data		= wc->ex.imm_data;
1154 1155 1156 1157
	tmp.qp_num		= wc->qp->qp_num;
	tmp.src_qp		= wc->src_qp;
	tmp.wc_flags		= wc->wc_flags;
	tmp.pkey_index		= wc->pkey_index;
1158
	if (rdma_cap_opa_ah(ib_dev, wc->port_num))
H
Hiatt, Don 已提交
1159
		tmp.slid	= OPA_TO_IB_UCAST_LID(wc->slid);
1160
	else
H
Hiatt, Don 已提交
1161
		tmp.slid	= ib_lid_cpu16(wc->slid);
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
	tmp.sl			= wc->sl;
	tmp.dlid_path_bits	= wc->dlid_path_bits;
	tmp.port_num		= wc->port_num;
	tmp.reserved		= 0;

	if (copy_to_user(dest, &tmp, sizeof tmp))
		return -EFAULT;

	return 0;
}

1173
static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
1174 1175
{
	struct ib_uverbs_poll_cq       cmd;
1176 1177 1178
	struct ib_uverbs_poll_cq_resp  resp;
	u8 __user                     *header_ptr;
	u8 __user                     *data_ptr;
1179
	struct ib_cq                  *cq;
1180 1181
	struct ib_wc                   wc;
	int                            ret;
1182

1183 1184 1185
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1186

1187
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1188 1189
	if (!cq)
		return -EINVAL;
1190

1191
	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
1192
	header_ptr = attrs->ucore.outbuf;
1193
	data_ptr = header_ptr + sizeof resp;
1194

1195 1196 1197 1198 1199 1200 1201 1202
	memset(&resp, 0, sizeof resp);
	while (resp.count < cmd.ne) {
		ret = ib_poll_cq(cq, 1, &wc);
		if (ret < 0)
			goto out_put;
		if (!ret)
			break;

1203
		ret = copy_wc_to_user(cq->device, data_ptr, &wc);
1204 1205 1206 1207 1208
		if (ret)
			goto out_put;

		data_ptr += sizeof(struct ib_uverbs_wc);
		++resp.count;
1209 1210
	}

1211
	if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1212
		ret = -EFAULT;
1213 1214
		goto out_put;
	}
1215
	ret = 0;
1216

1217 1218 1219
	if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
		ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);

1220
out_put:
1221 1222
	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
1223
	return ret;
1224 1225
}

1226
static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
1227 1228 1229
{
	struct ib_uverbs_req_notify_cq cmd;
	struct ib_cq                  *cq;
1230
	int ret;
1231

1232 1233 1234
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1235

1236
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1237
	if (!cq)
1238
		return -EINVAL;
1239

1240 1241 1242
	ib_req_notify_cq(cq, cmd.solicited_only ?
			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);

1243 1244
	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
1245
	return 0;
1246 1247
}

1248
static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs)
1249
{
1250 1251
	struct ib_uverbs_destroy_cq      cmd;
	struct ib_uverbs_destroy_cq_resp resp;
1252 1253
	struct ib_uobject		*uobj;
	struct ib_ucq_object        	*obj;
1254
	int ret;
1255

1256 1257 1258
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1259

1260
	uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1261 1262 1263
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);

1264
	obj = container_of(uobj, struct ib_ucq_object, uevent.uobject);
1265
	memset(&resp, 0, sizeof(resp));
1266
	resp.comp_events_reported  = obj->comp_events_reported;
1267
	resp.async_events_reported = obj->uevent.events_reported;
1268

1269 1270
	uobj_put_destroy(uobj);

1271
	return uverbs_response(attrs, &resp, sizeof(resp));
1272 1273
}

1274
static int create_qp(struct uverbs_attr_bundle *attrs,
1275
		     struct ib_uverbs_ex_create_qp *cmd)
1276
{
1277 1278 1279 1280
	struct ib_uqp_object		*obj;
	struct ib_device		*device;
	struct ib_pd			*pd = NULL;
	struct ib_xrcd			*xrcd = NULL;
1281
	struct ib_uobject		*xrcd_uobj = ERR_PTR(-ENOENT);
1282 1283 1284
	struct ib_cq			*scq = NULL, *rcq = NULL;
	struct ib_srq			*srq = NULL;
	struct ib_qp			*qp;
1285
	struct ib_qp_init_attr		attr = {};
1286 1287
	struct ib_uverbs_ex_create_qp_resp resp;
	int				ret;
1288 1289
	struct ib_rwq_ind_table *ind_tbl = NULL;
	bool has_sq = true;
1290
	struct ib_device *ib_dev;
1291 1292

	if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
O
Or Gerlitz 已提交
1293 1294
		return -EPERM;

1295
	obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1296
						 &ib_dev);
1297 1298 1299 1300
	if (IS_ERR(obj))
		return PTR_ERR(obj);
	obj->uxrcd = NULL;
	obj->uevent.uobject.user_handle = cmd->user_handle;
1301
	mutex_init(&obj->mcast_lock);
1302

1303
	if (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE) {
1304 1305
		ind_tbl = uobj_get_obj_read(rwq_ind_table,
					    UVERBS_OBJECT_RWQ_IND_TBL,
1306
					    cmd->rwq_ind_tbl_handle, attrs);
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
		if (!ind_tbl) {
			ret = -EINVAL;
			goto err_put;
		}

		attr.rwq_ind_tbl = ind_tbl;
	}

	if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
		ret = -EINVAL;
		goto err_put;
	}

	if (ind_tbl && !cmd->max_send_wr)
		has_sq = false;
1322

1323
	if (cmd->qp_type == IB_QPT_XRC_TGT) {
1324
		xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
1325
					  attrs);
1326 1327 1328 1329 1330 1331 1332

		if (IS_ERR(xrcd_uobj)) {
			ret = -EINVAL;
			goto err_put;
		}

		xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1333 1334 1335 1336 1337
		if (!xrcd) {
			ret = -EINVAL;
			goto err_put;
		}
		device = xrcd->device;
1338
	} else {
1339 1340 1341
		if (cmd->qp_type == IB_QPT_XRC_INI) {
			cmd->max_recv_wr = 0;
			cmd->max_recv_sge = 0;
1342
		} else {
1343
			if (cmd->is_srq) {
1344
				srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
1345
							cmd->srq_handle, attrs);
1346
				if (!srq || srq->srq_type == IB_SRQT_XRC) {
1347 1348 1349 1350
					ret = -EINVAL;
					goto err_put;
				}
			}
1351

1352 1353
			if (!ind_tbl) {
				if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1354 1355
					rcq = uobj_get_obj_read(
						cq, UVERBS_OBJECT_CQ,
1356
						cmd->recv_cq_handle, attrs);
1357 1358 1359 1360
					if (!rcq) {
						ret = -EINVAL;
						goto err_put;
					}
1361
				}
1362 1363
			}
		}
1364

1365
		if (has_sq)
1366
			scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
1367
						cmd->send_cq_handle, attrs);
1368 1369
		if (!ind_tbl)
			rcq = rcq ?: scq;
1370
		pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
1371
				       attrs);
1372
		if (!pd || (!scq && has_sq)) {
1373 1374 1375 1376
			ret = -EINVAL;
			goto err_put;
		}

1377
		device = pd->device;
1378 1379
	}

1380
	attr.event_handler = ib_uverbs_qp_event_handler;
1381
	attr.qp_context    = attrs->ufile;
1382 1383
	attr.send_cq       = scq;
	attr.recv_cq       = rcq;
1384
	attr.srq           = srq;
1385
	attr.xrcd	   = xrcd;
1386 1387 1388
	attr.sq_sig_type   = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
					      IB_SIGNAL_REQ_WR;
	attr.qp_type       = cmd->qp_type;
1389
	attr.create_flags  = 0;
1390

1391 1392 1393 1394 1395
	attr.cap.max_send_wr     = cmd->max_send_wr;
	attr.cap.max_recv_wr     = cmd->max_recv_wr;
	attr.cap.max_send_sge    = cmd->max_send_sge;
	attr.cap.max_recv_sge    = cmd->max_recv_sge;
	attr.cap.max_inline_data = cmd->max_inline_data;
1396

1397 1398 1399
	obj->uevent.events_reported     = 0;
	INIT_LIST_HEAD(&obj->uevent.event_list);
	INIT_LIST_HEAD(&obj->mcast_list);
1400

1401
	attr.create_flags = cmd->create_flags;
1402 1403 1404
	if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
				IB_QP_CREATE_CROSS_CHANNEL |
				IB_QP_CREATE_MANAGED_SEND |
1405
				IB_QP_CREATE_MANAGED_RECV |
1406
				IB_QP_CREATE_SCATTER_FCS |
1407
				IB_QP_CREATE_CVLAN_STRIPPING |
1408 1409
				IB_QP_CREATE_SOURCE_QPN |
				IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
1410 1411 1412 1413
		ret = -EINVAL;
		goto err_put;
	}

1414 1415 1416 1417 1418 1419 1420 1421 1422
	if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
		if (!capable(CAP_NET_RAW)) {
			ret = -EPERM;
			goto err_put;
		}

		attr.source_qpn = cmd->source_qpn;
	}

1423
	if (cmd->qp_type == IB_QPT_XRC_TGT)
1424 1425
		qp = ib_create_qp(pd, &attr);
	else
1426
		qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
1427
				   &obj->uevent.uobject);
1428

1429 1430
	if (IS_ERR(qp)) {
		ret = PTR_ERR(qp);
1431
		goto err_put;
1432 1433
	}

1434
	if (cmd->qp_type != IB_QPT_XRC_TGT) {
1435 1436 1437 1438
		ret = ib_create_qp_security(qp, device);
		if (ret)
			goto err_cb;

1439 1440 1441 1442
		qp->pd		  = pd;
		qp->send_cq	  = attr.send_cq;
		qp->recv_cq	  = attr.recv_cq;
		qp->srq		  = attr.srq;
1443
		qp->rwq_ind_tbl	  = ind_tbl;
1444 1445 1446
		qp->event_handler = attr.event_handler;
		qp->qp_context	  = attr.qp_context;
		qp->qp_type	  = attr.qp_type;
1447
		atomic_set(&qp->usecnt, 0);
1448
		atomic_inc(&pd->usecnt);
1449
		qp->port = 0;
1450 1451
		if (attr.send_cq)
			atomic_inc(&attr.send_cq->usecnt);
1452 1453 1454 1455
		if (attr.recv_cq)
			atomic_inc(&attr.recv_cq->usecnt);
		if (attr.srq)
			atomic_inc(&attr.srq->usecnt);
1456 1457
		if (ind_tbl)
			atomic_inc(&ind_tbl->usecnt);
1458 1459 1460
	} else {
		/* It is done in _ib_create_qp for other QP types */
		qp->uobject = &obj->uevent.uobject;
1461
	}
1462

1463
	obj->uevent.uobject.object = qp;
1464

1465
	memset(&resp, 0, sizeof resp);
1466 1467 1468 1469 1470 1471 1472
	resp.base.qpn             = qp->qp_num;
	resp.base.qp_handle       = obj->uevent.uobject.id;
	resp.base.max_recv_sge    = attr.cap.max_recv_sge;
	resp.base.max_send_sge    = attr.cap.max_send_sge;
	resp.base.max_recv_wr     = attr.cap.max_recv_wr;
	resp.base.max_send_wr     = attr.cap.max_send_wr;
	resp.base.max_inline_data = attr.cap.max_inline_data;
1473
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1474

1475
	ret = uverbs_response(attrs, &resp, sizeof(resp));
1476 1477
	if (ret)
		goto err_cb;
1478

1479 1480 1481 1482
	if (xrcd) {
		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
					  uobject);
		atomic_inc(&obj->uxrcd->refcnt);
1483
		uobj_put_read(xrcd_uobj);
1484 1485
	}

1486
	if (pd)
1487
		uobj_put_obj_read(pd);
1488
	if (scq)
1489 1490
		rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
1491
	if (rcq && rcq != scq)
1492 1493
		rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
1494
	if (srq)
1495
		uobj_put_obj_read(srq);
1496
	if (ind_tbl)
1497
		uobj_put_obj_read(ind_tbl);
1498

1499 1500
	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
	return 0;
1501
err_cb:
1502
	ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
1503

1504
err_put:
1505 1506
	if (!IS_ERR(xrcd_uobj))
		uobj_put_read(xrcd_uobj);
1507
	if (pd)
1508
		uobj_put_obj_read(pd);
1509
	if (scq)
1510 1511
		rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
R
Roland Dreier 已提交
1512
	if (rcq && rcq != scq)
1513 1514
		rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
1515
	if (srq)
1516
		uobj_put_obj_read(srq);
1517
	if (ind_tbl)
1518
		uobj_put_obj_read(ind_tbl);
1519

1520
	uobj_alloc_abort(&obj->uevent.uobject, attrs);
1521 1522 1523
	return ret;
}

1524
static int ib_uverbs_create_qp(struct uverbs_attr_bundle *attrs)
1525 1526 1527
{
	struct ib_uverbs_create_qp      cmd;
	struct ib_uverbs_ex_create_qp	cmd_ex;
1528
	int ret;
1529

1530 1531 1532
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548

	memset(&cmd_ex, 0, sizeof(cmd_ex));
	cmd_ex.user_handle = cmd.user_handle;
	cmd_ex.pd_handle = cmd.pd_handle;
	cmd_ex.send_cq_handle = cmd.send_cq_handle;
	cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
	cmd_ex.srq_handle = cmd.srq_handle;
	cmd_ex.max_send_wr = cmd.max_send_wr;
	cmd_ex.max_recv_wr = cmd.max_recv_wr;
	cmd_ex.max_send_sge = cmd.max_send_sge;
	cmd_ex.max_recv_sge = cmd.max_recv_sge;
	cmd_ex.max_inline_data = cmd.max_inline_data;
	cmd_ex.sq_sig_all = cmd.sq_sig_all;
	cmd_ex.qp_type = cmd.qp_type;
	cmd_ex.is_srq = cmd.is_srq;

1549
	return create_qp(attrs, &cmd_ex);
1550 1551
}

1552
static int ib_uverbs_ex_create_qp(struct uverbs_attr_bundle *attrs)
1553
{
1554 1555
	struct ib_uverbs_ex_create_qp cmd;
	int ret;
1556

1557 1558 1559
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1560

1561
	if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
1562 1563 1564 1565 1566
		return -EINVAL;

	if (cmd.reserved)
		return -EINVAL;

1567
	return create_qp(attrs, &cmd);
1568 1569
}

1570
static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
1571 1572 1573 1574 1575 1576 1577 1578 1579
{
	struct ib_uverbs_open_qp        cmd;
	struct ib_uverbs_create_qp_resp resp;
	struct ib_uqp_object           *obj;
	struct ib_xrcd		       *xrcd;
	struct ib_uobject	       *uninitialized_var(xrcd_uobj);
	struct ib_qp                   *qp;
	struct ib_qp_open_attr          attr;
	int ret;
1580
	struct ib_device *ib_dev;
1581

1582 1583 1584
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1585

1586
	obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1587
						 &ib_dev);
1588 1589
	if (IS_ERR(obj))
		return PTR_ERR(obj);
1590

1591
	xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, attrs);
1592 1593 1594 1595
	if (IS_ERR(xrcd_uobj)) {
		ret = -EINVAL;
		goto err_put;
	}
1596

1597
	xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1598 1599
	if (!xrcd) {
		ret = -EINVAL;
1600
		goto err_xrcd;
1601 1602 1603
	}

	attr.event_handler = ib_uverbs_qp_event_handler;
1604
	attr.qp_context    = attrs->ufile;
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
	attr.qp_num        = cmd.qpn;
	attr.qp_type       = cmd.qp_type;

	obj->uevent.events_reported = 0;
	INIT_LIST_HEAD(&obj->uevent.event_list);
	INIT_LIST_HEAD(&obj->mcast_list);

	qp = ib_open_qp(xrcd, &attr);
	if (IS_ERR(qp)) {
		ret = PTR_ERR(qp);
1615
		goto err_xrcd;
1616 1617 1618
	}

	obj->uevent.uobject.object = qp;
1619
	obj->uevent.uobject.user_handle = cmd.user_handle;
1620 1621 1622 1623 1624

	memset(&resp, 0, sizeof resp);
	resp.qpn       = qp->qp_num;
	resp.qp_handle = obj->uevent.uobject.id;

1625 1626
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
1627
		goto err_destroy;
1628

1629 1630
	obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
	atomic_inc(&obj->uxrcd->refcnt);
1631 1632
	qp->uobject = &obj->uevent.uobject;
	uobj_put_read(xrcd_uobj);
1633

1634 1635
	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
	return 0;
1636 1637

err_destroy:
1638
	ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
1639 1640
err_xrcd:
	uobj_put_read(xrcd_uobj);
1641
err_put:
1642
	uobj_alloc_abort(&obj->uevent.uobject, attrs);
1643 1644 1645
	return ret;
}

1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
				   struct rdma_ah_attr *rdma_attr)
{
	const struct ib_global_route   *grh;

	uverb_attr->dlid              = rdma_ah_get_dlid(rdma_attr);
	uverb_attr->sl                = rdma_ah_get_sl(rdma_attr);
	uverb_attr->src_path_bits     = rdma_ah_get_path_bits(rdma_attr);
	uverb_attr->static_rate       = rdma_ah_get_static_rate(rdma_attr);
	uverb_attr->is_global         = !!(rdma_ah_get_ah_flags(rdma_attr) &
					 IB_AH_GRH);
	if (uverb_attr->is_global) {
		grh = rdma_ah_read_grh(rdma_attr);
		memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
		uverb_attr->flow_label        = grh->flow_label;
		uverb_attr->sgid_index        = grh->sgid_index;
		uverb_attr->hop_limit         = grh->hop_limit;
		uverb_attr->traffic_class     = grh->traffic_class;
	}
	uverb_attr->port_num          = rdma_ah_get_port_num(rdma_attr);
}

1668
static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
1669 1670 1671 1672 1673 1674 1675 1676
{
	struct ib_uverbs_query_qp      cmd;
	struct ib_uverbs_query_qp_resp resp;
	struct ib_qp                   *qp;
	struct ib_qp_attr              *attr;
	struct ib_qp_init_attr         *init_attr;
	int                            ret;

1677 1678 1679
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1680 1681 1682 1683 1684 1685 1686 1687

	attr      = kmalloc(sizeof *attr, GFP_KERNEL);
	init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
	if (!attr || !init_attr) {
		ret = -ENOMEM;
		goto out;
	}

1688
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
1689
	if (!qp) {
1690
		ret = -EINVAL;
1691 1692 1693 1694
		goto out;
	}

	ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1695

1696
	uobj_put_obj_read(qp);
1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713

	if (ret)
		goto out;

	memset(&resp, 0, sizeof resp);

	resp.qp_state               = attr->qp_state;
	resp.cur_qp_state           = attr->cur_qp_state;
	resp.path_mtu               = attr->path_mtu;
	resp.path_mig_state         = attr->path_mig_state;
	resp.qkey                   = attr->qkey;
	resp.rq_psn                 = attr->rq_psn;
	resp.sq_psn                 = attr->sq_psn;
	resp.dest_qp_num            = attr->dest_qp_num;
	resp.qp_access_flags        = attr->qp_access_flags;
	resp.pkey_index             = attr->pkey_index;
	resp.alt_pkey_index         = attr->alt_pkey_index;
1714
	resp.sq_draining            = attr->sq_draining;
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
	resp.max_rd_atomic          = attr->max_rd_atomic;
	resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
	resp.min_rnr_timer          = attr->min_rnr_timer;
	resp.port_num               = attr->port_num;
	resp.timeout                = attr->timeout;
	resp.retry_cnt              = attr->retry_cnt;
	resp.rnr_retry              = attr->rnr_retry;
	resp.alt_port_num           = attr->alt_port_num;
	resp.alt_timeout            = attr->alt_timeout;

1725 1726
	copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
	copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
1727 1728 1729 1730 1731 1732

	resp.max_send_wr            = init_attr->cap.max_send_wr;
	resp.max_recv_wr            = init_attr->cap.max_recv_wr;
	resp.max_send_sge           = init_attr->cap.max_send_sge;
	resp.max_recv_sge           = init_attr->cap.max_recv_sge;
	resp.max_inline_data        = init_attr->cap.max_inline_data;
1733
	resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1734

1735
	ret = uverbs_response(attrs, &resp, sizeof(resp));
1736 1737 1738 1739 1740

out:
	kfree(attr);
	kfree(init_attr);

1741
	return ret;
1742 1743
}

1744 1745 1746 1747 1748 1749
/* Remove ignored fields set in the attribute mask */
static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
{
	switch (qp_type) {
	case IB_QPT_XRC_INI:
		return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1750 1751 1752
	case IB_QPT_XRC_TGT:
		return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
				IB_QP_RNR_RETRY);
1753 1754 1755 1756 1757
	default:
		return mask;
	}
}

1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
static void copy_ah_attr_from_uverbs(struct ib_device *dev,
				     struct rdma_ah_attr *rdma_attr,
				     struct ib_uverbs_qp_dest *uverb_attr)
{
	rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
	if (uverb_attr->is_global) {
		rdma_ah_set_grh(rdma_attr, NULL,
				uverb_attr->flow_label,
				uverb_attr->sgid_index,
				uverb_attr->hop_limit,
				uverb_attr->traffic_class);
		rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
	} else {
		rdma_ah_set_ah_flags(rdma_attr, 0);
	}
	rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
	rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
	rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
	rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
	rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
	rdma_ah_set_make_grd(rdma_attr, false);
}

1781
static int modify_qp(struct uverbs_attr_bundle *attrs,
1782
		     struct ib_uverbs_ex_modify_qp *cmd)
1783
{
1784 1785 1786
	struct ib_qp_attr *attr;
	struct ib_qp *qp;
	int ret;
1787

1788
	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1789 1790 1791
	if (!attr)
		return -ENOMEM;

1792 1793
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
			       attrs);
1794
	if (!qp) {
1795 1796 1797 1798
		ret = -EINVAL;
		goto out;
	}

1799 1800
	if ((cmd->base.attr_mask & IB_QP_PORT) &&
	    !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1801 1802 1803 1804
		ret = -EINVAL;
		goto release_qp;
	}

1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
	if ((cmd->base.attr_mask & IB_QP_AV)) {
		if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
			ret = -EINVAL;
			goto release_qp;
		}

		if (cmd->base.attr_mask & IB_QP_STATE &&
		    cmd->base.qp_state == IB_QPS_RTR) {
		/* We are in INIT->RTR TRANSITION (if we are not,
		 * this transition will be rejected in subsequent checks).
		 * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
		 * but the IB_QP_STATE flag is required.
		 *
		 * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
		 * when IB_QP_AV is set, has required inclusion of a valid
		 * port number in the primary AV. (AVs are created and handled
		 * differently for infiniband and ethernet (RoCE) ports).
		 *
		 * Check the port number included in the primary AV against
		 * the port number in the qp struct, which was set (and saved)
		 * in the RST->INIT transition.
		 */
			if (cmd->base.dest.port_num != qp->real_qp->port) {
				ret = -EINVAL;
				goto release_qp;
			}
		} else {
		/* We are in SQD->SQD. (If we are not, this transition will
		 * be rejected later in the verbs layer checks).
		 * Check for both IB_QP_PORT and IB_QP_AV, these can be set
		 * together in the SQD->SQD transition.
		 *
		 * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
		 * verbs layer driver does not track primary port changes
		 * resulting from path migration. Thus, in SQD, if the primary
		 * AV is modified, the primary port should also be modified).
		 *
		 * Note that in this transition, the IB_QP_STATE flag
		 * is not allowed.
		 */
			if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
			     == (IB_QP_AV | IB_QP_PORT)) &&
			    cmd->base.port_num != cmd->base.dest.port_num) {
				ret = -EINVAL;
				goto release_qp;
			}
			if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
			    == IB_QP_AV) {
				cmd->base.attr_mask |= IB_QP_PORT;
				cmd->base.port_num = cmd->base.dest.port_num;
			}
		}
1857 1858
	}

1859
	if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1860
	    (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
1861 1862
	    !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
	    cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
1863 1864 1865 1866
		ret = -EINVAL;
		goto release_qp;
	}

1867 1868
	if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
	    cmd->base.cur_qp_state > IB_QPS_ERR) ||
1869 1870
	    (cmd->base.attr_mask & IB_QP_STATE &&
	    cmd->base.qp_state > IB_QPS_ERR)) {
1871 1872 1873 1874
		ret = -EINVAL;
		goto release_qp;
	}

1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917
	if (cmd->base.attr_mask & IB_QP_STATE)
		attr->qp_state = cmd->base.qp_state;
	if (cmd->base.attr_mask & IB_QP_CUR_STATE)
		attr->cur_qp_state = cmd->base.cur_qp_state;
	if (cmd->base.attr_mask & IB_QP_PATH_MTU)
		attr->path_mtu = cmd->base.path_mtu;
	if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
		attr->path_mig_state = cmd->base.path_mig_state;
	if (cmd->base.attr_mask & IB_QP_QKEY)
		attr->qkey = cmd->base.qkey;
	if (cmd->base.attr_mask & IB_QP_RQ_PSN)
		attr->rq_psn = cmd->base.rq_psn;
	if (cmd->base.attr_mask & IB_QP_SQ_PSN)
		attr->sq_psn = cmd->base.sq_psn;
	if (cmd->base.attr_mask & IB_QP_DEST_QPN)
		attr->dest_qp_num = cmd->base.dest_qp_num;
	if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
		attr->qp_access_flags = cmd->base.qp_access_flags;
	if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
		attr->pkey_index = cmd->base.pkey_index;
	if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
		attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
	if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
		attr->max_rd_atomic = cmd->base.max_rd_atomic;
	if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
		attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
	if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
		attr->min_rnr_timer = cmd->base.min_rnr_timer;
	if (cmd->base.attr_mask & IB_QP_PORT)
		attr->port_num = cmd->base.port_num;
	if (cmd->base.attr_mask & IB_QP_TIMEOUT)
		attr->timeout = cmd->base.timeout;
	if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
		attr->retry_cnt = cmd->base.retry_cnt;
	if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
		attr->rnr_retry = cmd->base.rnr_retry;
	if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
		attr->alt_port_num = cmd->base.alt_port_num;
		attr->alt_timeout = cmd->base.alt_timeout;
		attr->alt_pkey_index = cmd->base.alt_pkey_index;
	}
	if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
		attr->rate_limit = cmd->rate_limit;
1918

1919
	if (cmd->base.attr_mask & IB_QP_AV)
1920 1921
		copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
					 &cmd->base.dest);
1922

1923
	if (cmd->base.attr_mask & IB_QP_ALT_PATH)
1924 1925
		copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
					 &cmd->base.alt_dest);
1926

1927 1928 1929
	ret = ib_modify_qp_with_udata(qp, attr,
				      modify_qp_mask(qp->qp_type,
						     cmd->base.attr_mask),
1930
				      &attrs->driver_udata);
1931

1932
release_qp:
1933
	uobj_put_obj_read(qp);
1934 1935 1936 1937 1938 1939
out:
	kfree(attr);

	return ret;
}

1940
static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs)
1941
{
1942
	struct ib_uverbs_ex_modify_qp cmd;
1943
	int ret;
1944

1945 1946 1947
	ret = uverbs_request(attrs, &cmd.base, sizeof(cmd.base));
	if (ret)
		return ret;
1948 1949 1950 1951 1952

	if (cmd.base.attr_mask &
	    ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
		return -EOPNOTSUPP;

1953
	return modify_qp(attrs, &cmd);
1954 1955
}

1956
static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs)
1957
{
1958
	struct ib_uverbs_ex_modify_qp cmd;
1959 1960 1961
	struct ib_uverbs_ex_modify_qp_resp resp = {
		.response_length = uverbs_response_length(attrs, sizeof(resp))
	};
1962 1963
	int ret;

1964 1965 1966 1967
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;

1968 1969 1970 1971 1972 1973 1974 1975 1976 1977
	/*
	 * Last bit is reserved for extending the attr_mask by
	 * using another field.
	 */
	BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));

	if (cmd.base.attr_mask &
	    ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
		return -EOPNOTSUPP;

1978 1979 1980 1981 1982
	ret = modify_qp(attrs, &cmd);
	if (ret)
		return ret;

	return uverbs_response(attrs, &resp, sizeof(resp));
1983 1984
}

1985
static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs)
1986
{
1987 1988
	struct ib_uverbs_destroy_qp      cmd;
	struct ib_uverbs_destroy_qp_resp resp;
1989 1990
	struct ib_uobject		*uobj;
	struct ib_uqp_object        	*obj;
1991
	int ret;
1992

1993 1994 1995
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1996

1997
	uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
1998 1999 2000
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);

2001
	obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2002
	memset(&resp, 0, sizeof(resp));
2003
	resp.events_reported = obj->uevent.events_reported;
2004 2005

	uobj_put_destroy(uobj);
2006

2007
	return uverbs_response(attrs, &resp, sizeof(resp));
2008 2009
}

C
Christoph Hellwig 已提交
2010 2011
static void *alloc_wr(size_t wr_size, __u32 num_sge)
{
2012 2013 2014 2015
	if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
		       sizeof (struct ib_sge))
		return NULL;

C
Christoph Hellwig 已提交
2016 2017
	return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
			 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2018
}
C
Christoph Hellwig 已提交
2019

2020
static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
2021 2022 2023 2024
{
	struct ib_uverbs_post_send      cmd;
	struct ib_uverbs_post_send_resp resp;
	struct ib_uverbs_send_wr       *user_wr;
2025 2026
	struct ib_send_wr              *wr = NULL, *last, *next;
	const struct ib_send_wr	       *bad_wr;
2027 2028
	struct ib_qp                   *qp;
	int                             i, sg_ind;
2029
	int				is_ud;
2030
	int ret, ret2;
2031
	size_t                          next_size;
2032 2033 2034
	const struct ib_sge __user *sgls;
	const void __user *wqes;
	struct uverbs_req_iter iter;
2035

2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
	if (ret)
		return ret;
	wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
	if (IS_ERR(wqes))
		return PTR_ERR(wqes);
	sgls = uverbs_request_next_ptr(
		&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
	if (IS_ERR(sgls))
		return PTR_ERR(sgls);
	ret = uverbs_request_finish(&iter);
	if (ret)
		return ret;
2049 2050 2051 2052 2053

	user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
	if (!user_wr)
		return -ENOMEM;

2054
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2055 2056
	if (!qp) {
		ret = -EINVAL;
2057
		goto out;
2058
	}
2059

2060
	is_ud = qp->qp_type == IB_QPT_UD;
2061 2062 2063
	sg_ind = 0;
	last = NULL;
	for (i = 0; i < cmd.wr_count; ++i) {
2064
		if (copy_from_user(user_wr, wqes + i * cmd.wqe_size,
2065 2066
				   cmd.wqe_size)) {
			ret = -EFAULT;
2067
			goto out_put;
2068 2069 2070 2071
		}

		if (user_wr->num_sge + sg_ind > cmd.sge_count) {
			ret = -EINVAL;
2072
			goto out_put;
2073 2074
		}

C
Christoph Hellwig 已提交
2075 2076 2077 2078 2079 2080 2081 2082 2083
		if (is_ud) {
			struct ib_ud_wr *ud;

			if (user_wr->opcode != IB_WR_SEND &&
			    user_wr->opcode != IB_WR_SEND_WITH_IMM) {
				ret = -EINVAL;
				goto out_put;
			}

2084 2085
			next_size = sizeof(*ud);
			ud = alloc_wr(next_size, user_wr->num_sge);
C
Christoph Hellwig 已提交
2086 2087 2088 2089 2090
			if (!ud) {
				ret = -ENOMEM;
				goto out_put;
			}

2091
			ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
2092
						   user_wr->wr.ud.ah, attrs);
C
Christoph Hellwig 已提交
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
			if (!ud->ah) {
				kfree(ud);
				ret = -EINVAL;
				goto out_put;
			}
			ud->remote_qpn = user_wr->wr.ud.remote_qpn;
			ud->remote_qkey = user_wr->wr.ud.remote_qkey;

			next = &ud->wr;
		} else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
			   user_wr->opcode == IB_WR_RDMA_WRITE ||
			   user_wr->opcode == IB_WR_RDMA_READ) {
			struct ib_rdma_wr *rdma;

2107 2108
			next_size = sizeof(*rdma);
			rdma = alloc_wr(next_size, user_wr->num_sge);
C
Christoph Hellwig 已提交
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
			if (!rdma) {
				ret = -ENOMEM;
				goto out_put;
			}

			rdma->remote_addr = user_wr->wr.rdma.remote_addr;
			rdma->rkey = user_wr->wr.rdma.rkey;

			next = &rdma->wr;
		} else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
			   user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
			struct ib_atomic_wr *atomic;

2122 2123
			next_size = sizeof(*atomic);
			atomic = alloc_wr(next_size, user_wr->num_sge);
C
Christoph Hellwig 已提交
2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
			if (!atomic) {
				ret = -ENOMEM;
				goto out_put;
			}

			atomic->remote_addr = user_wr->wr.atomic.remote_addr;
			atomic->compare_add = user_wr->wr.atomic.compare_add;
			atomic->swap = user_wr->wr.atomic.swap;
			atomic->rkey = user_wr->wr.atomic.rkey;

			next = &atomic->wr;
		} else if (user_wr->opcode == IB_WR_SEND ||
			   user_wr->opcode == IB_WR_SEND_WITH_IMM ||
			   user_wr->opcode == IB_WR_SEND_WITH_INV) {
2138 2139
			next_size = sizeof(*next);
			next = alloc_wr(next_size, user_wr->num_sge);
C
Christoph Hellwig 已提交
2140 2141 2142 2143 2144 2145
			if (!next) {
				ret = -ENOMEM;
				goto out_put;
			}
		} else {
			ret = -EINVAL;
2146
			goto out_put;
2147 2148
		}

C
Christoph Hellwig 已提交
2149 2150 2151 2152 2153 2154 2155 2156
		if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
		    user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
			next->ex.imm_data =
					(__be32 __force) user_wr->ex.imm_data;
		} else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
			next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
		}

2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
		if (!last)
			wr = next;
		else
			last->next = next;
		last = next;

		next->next       = NULL;
		next->wr_id      = user_wr->wr_id;
		next->num_sge    = user_wr->num_sge;
		next->opcode     = user_wr->opcode;
		next->send_flags = user_wr->send_flags;

		if (next->num_sge) {
			next->sg_list = (void *) next +
2171
				ALIGN(next_size, sizeof(struct ib_sge));
2172 2173 2174
			if (copy_from_user(next->sg_list, sgls + sg_ind,
					   next->num_sge *
						   sizeof(struct ib_sge))) {
2175
				ret = -EFAULT;
2176
				goto out_put;
2177 2178 2179 2180 2181 2182 2183
			}
			sg_ind += next->num_sge;
		} else
			next->sg_list = NULL;
	}

	resp.bad_wr = 0;
K
Kamal Heib 已提交
2184
	ret = qp->device->ops.post_send(qp->real_qp, wr, &bad_wr);
2185 2186 2187 2188 2189 2190 2191
	if (ret)
		for (next = wr; next; next = next->next) {
			++resp.bad_wr;
			if (next == bad_wr)
				break;
		}

2192 2193 2194
	ret2 = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret2)
		ret = ret2;
2195

2196
out_put:
2197
	uobj_put_obj_read(qp);
2198 2199

	while (wr) {
C
Christoph Hellwig 已提交
2200
		if (is_ud && ud_wr(wr)->ah)
2201
			uobj_put_obj_read(ud_wr(wr)->ah);
2202 2203 2204 2205 2206
		next = wr->next;
		kfree(wr);
		wr = next;
	}

2207
out:
2208 2209
	kfree(user_wr);

2210
	return ret;
2211 2212
}

2213 2214 2215
static struct ib_recv_wr *
ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
			  u32 wqe_size, u32 sge_count)
2216 2217 2218 2219 2220 2221
{
	struct ib_uverbs_recv_wr *user_wr;
	struct ib_recv_wr        *wr = NULL, *last, *next;
	int                       sg_ind;
	int                       i;
	int                       ret;
2222 2223
	const struct ib_sge __user *sgls;
	const void __user *wqes;
2224 2225 2226 2227

	if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
		return ERR_PTR(-EINVAL);

2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238
	wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
	if (IS_ERR(wqes))
		return ERR_CAST(wqes);
	sgls = uverbs_request_next_ptr(
		iter, sge_count * sizeof(struct ib_uverbs_sge));
	if (IS_ERR(sgls))
		return ERR_CAST(sgls);
	ret = uverbs_request_finish(iter);
	if (ret)
		return ERR_PTR(ret);

2239 2240 2241 2242 2243 2244 2245
	user_wr = kmalloc(wqe_size, GFP_KERNEL);
	if (!user_wr)
		return ERR_PTR(-ENOMEM);

	sg_ind = 0;
	last = NULL;
	for (i = 0; i < wr_count; ++i) {
2246
		if (copy_from_user(user_wr, wqes + i * wqe_size,
2247 2248 2249 2250 2251 2252 2253 2254 2255 2256
				   wqe_size)) {
			ret = -EFAULT;
			goto err;
		}

		if (user_wr->num_sge + sg_ind > sge_count) {
			ret = -EINVAL;
			goto err;
		}

2257 2258 2259 2260 2261 2262 2263
		if (user_wr->num_sge >=
		    (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
		    sizeof (struct ib_sge)) {
			ret = -EINVAL;
			goto err;
		}

2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
			       user_wr->num_sge * sizeof (struct ib_sge),
			       GFP_KERNEL);
		if (!next) {
			ret = -ENOMEM;
			goto err;
		}

		if (!last)
			wr = next;
		else
			last->next = next;
		last = next;

		next->next       = NULL;
		next->wr_id      = user_wr->wr_id;
		next->num_sge    = user_wr->num_sge;

		if (next->num_sge) {
			next->sg_list = (void *) next +
				ALIGN(sizeof *next, sizeof (struct ib_sge));
2285 2286 2287
			if (copy_from_user(next->sg_list, sgls + sg_ind,
					   next->num_sge *
						   sizeof(struct ib_sge))) {
2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310
				ret = -EFAULT;
				goto err;
			}
			sg_ind += next->num_sge;
		} else
			next->sg_list = NULL;
	}

	kfree(user_wr);
	return wr;

err:
	kfree(user_wr);

	while (wr) {
		next = wr->next;
		kfree(wr);
		wr = next;
	}

	return ERR_PTR(ret);
}

2311
static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
2312 2313 2314
{
	struct ib_uverbs_post_recv      cmd;
	struct ib_uverbs_post_recv_resp resp;
2315 2316
	struct ib_recv_wr              *wr, *next;
	const struct ib_recv_wr	       *bad_wr;
2317
	struct ib_qp                   *qp;
2318
	int ret, ret2;
2319
	struct uverbs_req_iter iter;
2320

2321 2322 2323
	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2324

2325 2326
	wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
				       cmd.sge_count);
2327 2328 2329
	if (IS_ERR(wr))
		return PTR_ERR(wr);

2330
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2331 2332
	if (!qp) {
		ret = -EINVAL;
2333
		goto out;
2334
	}
2335 2336

	resp.bad_wr = 0;
K
Kamal Heib 已提交
2337
	ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr);
2338

2339 2340
	uobj_put_obj_read(qp);
	if (ret) {
2341 2342 2343 2344 2345
		for (next = wr; next; next = next->next) {
			++resp.bad_wr;
			if (next == bad_wr)
				break;
		}
2346
	}
2347

2348 2349 2350
	ret2 = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret2)
		ret = ret2;
2351 2352 2353 2354 2355 2356 2357
out:
	while (wr) {
		next = wr->next;
		kfree(wr);
		wr = next;
	}

2358
	return ret;
2359 2360
}

2361
static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
2362 2363 2364
{
	struct ib_uverbs_post_srq_recv      cmd;
	struct ib_uverbs_post_srq_recv_resp resp;
2365 2366
	struct ib_recv_wr                  *wr, *next;
	const struct ib_recv_wr		   *bad_wr;
2367
	struct ib_srq                      *srq;
2368
	int ret, ret2;
2369
	struct uverbs_req_iter iter;
2370

2371 2372 2373
	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2374

2375 2376
	wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
				       cmd.sge_count);
2377 2378 2379
	if (IS_ERR(wr))
		return PTR_ERR(wr);

2380
	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
2381 2382
	if (!srq) {
		ret = -EINVAL;
2383
		goto out;
2384
	}
2385 2386

	resp.bad_wr = 0;
K
Kamal Heib 已提交
2387
	ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr);
2388

2389
	uobj_put_obj_read(srq);
2390

2391 2392 2393 2394 2395 2396 2397
	if (ret)
		for (next = wr; next; next = next->next) {
			++resp.bad_wr;
			if (next == bad_wr)
				break;
		}

2398 2399 2400
	ret2 = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret2)
		ret = ret2;
2401 2402 2403 2404 2405 2406 2407 2408

out:
	while (wr) {
		next = wr->next;
		kfree(wr);
		wr = next;
	}

2409
	return ret;
2410 2411
}

2412
static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
2413 2414 2415 2416 2417 2418
{
	struct ib_uverbs_create_ah	 cmd;
	struct ib_uverbs_create_ah_resp	 resp;
	struct ib_uobject		*uobj;
	struct ib_pd			*pd;
	struct ib_ah			*ah;
2419
	struct rdma_ah_attr		attr = {};
2420
	int ret;
2421
	struct ib_device *ib_dev;
2422

2423 2424 2425
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2426

2427
	uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev);
2428 2429
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
2430

2431 2432 2433 2434 2435
	if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) {
		ret = -EINVAL;
		goto err;
	}

2436
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
2437
	if (!pd) {
2438
		ret = -EINVAL;
2439
		goto err;
2440 2441
	}

2442
	attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
2443
	rdma_ah_set_make_grd(&attr, false);
2444 2445 2446 2447 2448 2449
	rdma_ah_set_dlid(&attr, cmd.attr.dlid);
	rdma_ah_set_sl(&attr, cmd.attr.sl);
	rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
	rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
	rdma_ah_set_port_num(&attr, cmd.attr.port_num);

2450
	if (cmd.attr.is_global) {
2451 2452 2453 2454 2455
		rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
				cmd.attr.grh.sgid_index,
				cmd.attr.grh.hop_limit,
				cmd.attr.grh.traffic_class);
		rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
2456
	} else {
2457
		rdma_ah_set_ah_flags(&attr, 0);
2458
	}
2459

2460
	ah = rdma_create_user_ah(pd, &attr, &attrs->driver_udata);
2461 2462
	if (IS_ERR(ah)) {
		ret = PTR_ERR(ah);
2463
		goto err_put;
2464 2465
	}

2466
	ah->uobject  = uobj;
2467
	uobj->user_handle = cmd.user_handle;
2468
	uobj->object = ah;
2469 2470 2471

	resp.ah_handle = uobj->id;

2472 2473
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
2474
		goto err_copy;
2475

2476
	uobj_put_obj_read(pd);
2477 2478
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
2479

2480
err_copy:
2481 2482
	rdma_destroy_ah_user(ah, RDMA_DESTROY_AH_SLEEPABLE,
			     uverbs_get_cleared_udata(attrs));
2483

2484 2485
err_put:
	uobj_put_obj_read(pd);
2486

2487
err:
2488
	uobj_alloc_abort(uobj, attrs);
2489 2490 2491
	return ret;
}

2492
static int ib_uverbs_destroy_ah(struct uverbs_attr_bundle *attrs)
2493 2494
{
	struct ib_uverbs_destroy_ah cmd;
2495
	int ret;
2496

2497 2498 2499
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2500

2501
	return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, attrs);
2502 2503
}

2504
static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
2505 2506 2507
{
	struct ib_uverbs_attach_mcast cmd;
	struct ib_qp                 *qp;
2508
	struct ib_uqp_object         *obj;
2509
	struct ib_uverbs_mcast_entry *mcast;
2510
	int                           ret;
2511

2512 2513 2514
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2515

2516
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2517 2518
	if (!qp)
		return -EINVAL;
2519

2520
	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2521

2522
	mutex_lock(&obj->mcast_lock);
2523
	list_for_each_entry(mcast, &obj->mcast_list, list)
2524 2525 2526
		if (cmd.mlid == mcast->lid &&
		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
			ret = 0;
2527
			goto out_put;
2528 2529 2530 2531 2532
		}

	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
	if (!mcast) {
		ret = -ENOMEM;
2533
		goto out_put;
2534 2535 2536 2537
	}

	mcast->lid = cmd.mlid;
	memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2538

2539
	ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2540 2541 2542
	if (!ret)
		list_add_tail(&mcast->list, &obj->mcast_list);
	else
2543 2544
		kfree(mcast);

2545
out_put:
2546
	mutex_unlock(&obj->mcast_lock);
2547
	uobj_put_obj_read(qp);
2548

2549
	return ret;
2550 2551
}

2552
static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
2553 2554
{
	struct ib_uverbs_detach_mcast cmd;
2555
	struct ib_uqp_object         *obj;
2556
	struct ib_qp                 *qp;
2557
	struct ib_uverbs_mcast_entry *mcast;
2558
	int                           ret;
2559
	bool                          found = false;
2560

2561 2562 2563
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2564

2565
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2566 2567
	if (!qp)
		return -EINVAL;
2568

2569
	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2570
	mutex_lock(&obj->mcast_lock);
2571

2572
	list_for_each_entry(mcast, &obj->mcast_list, list)
2573 2574 2575 2576
		if (cmd.mlid == mcast->lid &&
		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
			list_del(&mcast->list);
			kfree(mcast);
2577
			found = true;
2578 2579 2580
			break;
		}

2581 2582 2583 2584 2585 2586 2587
	if (!found) {
		ret = -EINVAL;
		goto out_put;
	}

	ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);

2588
out_put:
2589
	mutex_unlock(&obj->mcast_lock);
2590
	uobj_put_obj_read(qp);
2591
	return ret;
2592
}
2593

2594
struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
2595 2596 2597
{
	struct ib_uflow_resources *resources;

2598
	resources = kzalloc(sizeof(*resources), GFP_KERNEL);
2599 2600

	if (!resources)
2601
		return NULL;
2602

2603 2604 2605
	if (!num_specs)
		goto out;

2606 2607 2608 2609 2610
	resources->counters =
		kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
	resources->collection =
		kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);

2611 2612
	if (!resources->counters || !resources->collection)
		goto err;
2613

2614
out:
2615 2616
	resources->max = num_specs;
	return resources;
2617

2618
err:
2619 2620
	kfree(resources->counters);
	kfree(resources);
2621

2622
	return NULL;
2623
}
2624
EXPORT_SYMBOL(flow_resources_alloc);
2625 2626 2627 2628 2629

void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
{
	unsigned int i;

2630 2631 2632
	if (!uflow_res)
		return;

2633
	for (i = 0; i < uflow_res->collection_num; i++)
2634 2635
		atomic_dec(&uflow_res->collection[i]->usecnt);

2636 2637 2638 2639 2640
	for (i = 0; i < uflow_res->counters_num; i++)
		atomic_dec(&uflow_res->counters[i]->usecnt);

	kfree(uflow_res->collection);
	kfree(uflow_res->counters);
2641 2642
	kfree(uflow_res);
}
2643
EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
2644

2645 2646 2647
void flow_resources_add(struct ib_uflow_resources *uflow_res,
			enum ib_flow_spec_type type,
			void *ibobj)
2648 2649 2650
{
	WARN_ON(uflow_res->num >= uflow_res->max);

2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666
	switch (type) {
	case IB_FLOW_SPEC_ACTION_HANDLE:
		atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
		uflow_res->collection[uflow_res->collection_num++] =
			(struct ib_flow_action *)ibobj;
		break;
	case IB_FLOW_SPEC_ACTION_COUNT:
		atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
		uflow_res->counters[uflow_res->counters_num++] =
			(struct ib_counters *)ibobj;
		break;
	default:
		WARN_ON(1);
	}

	uflow_res->num++;
2667
}
2668
EXPORT_SYMBOL(flow_resources_add);
2669

2670
static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
2671 2672 2673
				       struct ib_uverbs_flow_spec *kern_spec,
				       union ib_flow_spec *ib_spec,
				       struct ib_uflow_resources *uflow_res)
2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
{
	ib_spec->type = kern_spec->type;
	switch (ib_spec->type) {
	case IB_FLOW_SPEC_ACTION_TAG:
		if (kern_spec->flow_tag.size !=
		    sizeof(struct ib_uverbs_flow_spec_action_tag))
			return -EINVAL;

		ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
		ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
		break;
2685 2686 2687 2688 2689 2690 2691
	case IB_FLOW_SPEC_ACTION_DROP:
		if (kern_spec->drop.size !=
		    sizeof(struct ib_uverbs_flow_spec_action_drop))
			return -EINVAL;

		ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
		break;
2692 2693 2694 2695 2696 2697 2698
	case IB_FLOW_SPEC_ACTION_HANDLE:
		if (kern_spec->action.size !=
		    sizeof(struct ib_uverbs_flow_spec_action_handle))
			return -EOPNOTSUPP;
		ib_spec->action.act = uobj_get_obj_read(flow_action,
							UVERBS_OBJECT_FLOW_ACTION,
							kern_spec->action.handle,
2699
							attrs);
2700 2701 2702 2703
		if (!ib_spec->action.act)
			return -EINVAL;
		ib_spec->action.size =
			sizeof(struct ib_flow_spec_action_handle);
2704 2705 2706
		flow_resources_add(uflow_res,
				   IB_FLOW_SPEC_ACTION_HANDLE,
				   ib_spec->action.act);
2707 2708
		uobj_put_obj_read(ib_spec->action.act);
		break;
2709 2710 2711 2712 2713 2714 2715 2716
	case IB_FLOW_SPEC_ACTION_COUNT:
		if (kern_spec->flow_count.size !=
			sizeof(struct ib_uverbs_flow_spec_action_count))
			return -EINVAL;
		ib_spec->flow_count.counters =
			uobj_get_obj_read(counters,
					  UVERBS_OBJECT_COUNTERS,
					  kern_spec->flow_count.handle,
2717
					  attrs);
2718 2719 2720 2721 2722 2723 2724 2725 2726
		if (!ib_spec->flow_count.counters)
			return -EINVAL;
		ib_spec->flow_count.size =
				sizeof(struct ib_flow_spec_action_count);
		flow_resources_add(uflow_res,
				   IB_FLOW_SPEC_ACTION_COUNT,
				   ib_spec->flow_count.counters);
		uobj_put_obj_read(ib_spec->flow_count.counters);
		break;
2727 2728 2729 2730 2731 2732
	default:
		return -EINVAL;
	}
	return 0;
}

2733
static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
2734 2735 2736 2737 2738
{
	/* Returns user space filter size, includes padding */
	return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
}

2739
static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756
				u16 ib_real_filter_sz)
{
	/*
	 * User space filter structures must be 64 bit aligned, otherwise this
	 * may pass, but we won't handle additional new attributes.
	 */

	if (kern_filter_size > ib_real_filter_sz) {
		if (memchr_inv(kern_spec_filter +
			       ib_real_filter_sz, 0,
			       kern_filter_size - ib_real_filter_sz))
			return -EINVAL;
		return ib_real_filter_sz;
	}
	return kern_filter_size;
}

2757 2758 2759 2760 2761
int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
					  const void *kern_spec_mask,
					  const void *kern_spec_val,
					  size_t kern_filter_sz,
					  union ib_flow_spec *ib_spec)
2762
{
2763 2764 2765 2766 2767 2768 2769
	ssize_t actual_filter_sz;
	ssize_t ib_filter_sz;

	/* User flow spec size must be aligned to 4 bytes */
	if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
		return -EINVAL;

2770 2771
	ib_spec->type = type;

2772 2773
	if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
		return -EINVAL;
2774

2775
	switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2776
	case IB_FLOW_SPEC_ETH:
2777 2778 2779 2780 2781
		ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
2782
			return -EINVAL;
2783 2784 2785
		ib_spec->size = sizeof(struct ib_flow_spec_eth);
		memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
2786 2787
		break;
	case IB_FLOW_SPEC_IPV4:
2788 2789 2790 2791 2792
		ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
2793
			return -EINVAL;
2794 2795 2796
		ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
		memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
2797
		break;
2798
	case IB_FLOW_SPEC_IPV6:
2799 2800 2801 2802 2803
		ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
2804
			return -EINVAL;
2805 2806 2807
		ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
		memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
2808 2809 2810 2811

		if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
		    (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
			return -EINVAL;
2812
		break;
2813 2814
	case IB_FLOW_SPEC_TCP:
	case IB_FLOW_SPEC_UDP:
2815 2816 2817 2818 2819
		ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
2820
			return -EINVAL;
2821 2822 2823
		ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
		memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
2824
		break;
2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839
	case IB_FLOW_SPEC_VXLAN_TUNNEL:
		ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
			return -EINVAL;
		ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
		memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);

		if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
		    (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
			return -EINVAL;
		break;
2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850
	case IB_FLOW_SPEC_ESP:
		ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
			return -EINVAL;
		ib_spec->esp.size = sizeof(struct ib_flow_spec_esp);
		memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
		break;
2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861
	case IB_FLOW_SPEC_GRE:
		ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
			return -EINVAL;
		ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
		memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
		break;
2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872
	case IB_FLOW_SPEC_MPLS:
		ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
			return -EINVAL;
		ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
		memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
		break;
2873 2874 2875 2876 2877 2878
	default:
		return -EINVAL;
	}
	return 0;
}

2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897
static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
				       union ib_flow_spec *ib_spec)
{
	ssize_t kern_filter_sz;
	void *kern_spec_mask;
	void *kern_spec_val;

	kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);

	kern_spec_val = (void *)kern_spec +
		sizeof(struct ib_uverbs_flow_spec_hdr);
	kern_spec_mask = kern_spec_val + kern_filter_sz;

	return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type,
						     kern_spec_mask,
						     kern_spec_val,
						     kern_filter_sz, ib_spec);
}

2898
static int kern_spec_to_ib_spec(struct uverbs_attr_bundle *attrs,
2899 2900 2901
				struct ib_uverbs_flow_spec *kern_spec,
				union ib_flow_spec *ib_spec,
				struct ib_uflow_resources *uflow_res)
2902 2903 2904 2905 2906
{
	if (kern_spec->reserved)
		return -EINVAL;

	if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2907
		return kern_spec_to_ib_spec_action(attrs, kern_spec, ib_spec,
2908
						   uflow_res);
2909 2910 2911 2912
	else
		return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
}

2913
static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
Y
Yishai Hadas 已提交
2914
{
2915
	struct ib_uverbs_ex_create_wq cmd;
Y
Yishai Hadas 已提交
2916 2917 2918 2919 2920 2921 2922
	struct ib_uverbs_ex_create_wq_resp resp = {};
	struct ib_uwq_object           *obj;
	int err = 0;
	struct ib_cq *cq;
	struct ib_pd *pd;
	struct ib_wq *wq;
	struct ib_wq_init_attr wq_init_attr = {};
2923
	struct ib_device *ib_dev;
Y
Yishai Hadas 已提交
2924

2925
	err = uverbs_request(attrs, &cmd, sizeof(cmd));
Y
Yishai Hadas 已提交
2926 2927 2928 2929 2930 2931
	if (err)
		return err;

	if (cmd.comp_mask)
		return -EOPNOTSUPP;

2932
	obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, attrs,
2933
						 &ib_dev);
2934 2935
	if (IS_ERR(obj))
		return PTR_ERR(obj);
Y
Yishai Hadas 已提交
2936

2937
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
Y
Yishai Hadas 已提交
2938 2939 2940 2941 2942
	if (!pd) {
		err = -EINVAL;
		goto err_uobj;
	}

2943
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
Y
Yishai Hadas 已提交
2944 2945 2946 2947 2948 2949 2950 2951
	if (!cq) {
		err = -EINVAL;
		goto err_put_pd;
	}

	wq_init_attr.cq = cq;
	wq_init_attr.max_sge = cmd.max_sge;
	wq_init_attr.max_wr = cmd.max_wr;
2952
	wq_init_attr.wq_context = attrs->ufile;
Y
Yishai Hadas 已提交
2953 2954
	wq_init_attr.wq_type = cmd.wq_type;
	wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
2955
	wq_init_attr.create_flags = cmd.create_flags;
Y
Yishai Hadas 已提交
2956 2957
	obj->uevent.events_reported = 0;
	INIT_LIST_HEAD(&obj->uevent.event_list);
2958

K
Kamal Heib 已提交
2959
	wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
Y
Yishai Hadas 已提交
2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982
	if (IS_ERR(wq)) {
		err = PTR_ERR(wq);
		goto err_put_cq;
	}

	wq->uobject = &obj->uevent.uobject;
	obj->uevent.uobject.object = wq;
	wq->wq_type = wq_init_attr.wq_type;
	wq->cq = cq;
	wq->pd = pd;
	wq->device = pd->device;
	wq->wq_context = wq_init_attr.wq_context;
	atomic_set(&wq->usecnt, 0);
	atomic_inc(&pd->usecnt);
	atomic_inc(&cq->usecnt);
	wq->uobject = &obj->uevent.uobject;
	obj->uevent.uobject.object = wq;

	memset(&resp, 0, sizeof(resp));
	resp.wq_handle = obj->uevent.uobject.id;
	resp.max_sge = wq_init_attr.max_sge;
	resp.max_wr = wq_init_attr.max_wr;
	resp.wqn = wq->wq_num;
2983
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
2984
	err = uverbs_response(attrs, &resp, sizeof(resp));
Y
Yishai Hadas 已提交
2985 2986 2987
	if (err)
		goto err_copy;

2988
	uobj_put_obj_read(pd);
2989 2990
	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
2991 2992
	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
	return 0;
Y
Yishai Hadas 已提交
2993 2994

err_copy:
2995
	ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs));
Y
Yishai Hadas 已提交
2996
err_put_cq:
2997 2998
	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
Y
Yishai Hadas 已提交
2999
err_put_pd:
3000
	uobj_put_obj_read(pd);
Y
Yishai Hadas 已提交
3001
err_uobj:
3002
	uobj_alloc_abort(&obj->uevent.uobject, attrs);
Y
Yishai Hadas 已提交
3003 3004 3005 3006

	return err;
}

3007
static int ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle *attrs)
Y
Yishai Hadas 已提交
3008
{
3009
	struct ib_uverbs_ex_destroy_wq	cmd;
Y
Yishai Hadas 已提交
3010 3011 3012 3013 3014
	struct ib_uverbs_ex_destroy_wq_resp	resp = {};
	struct ib_uobject		*uobj;
	struct ib_uwq_object		*obj;
	int				ret;

3015
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
Y
Yishai Hadas 已提交
3016 3017 3018 3019 3020 3021
	if (ret)
		return ret;

	if (cmd.comp_mask)
		return -EOPNOTSUPP;

3022
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3023
	uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
3024 3025
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
Y
Yishai Hadas 已提交
3026 3027 3028

	obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
	resp.events_reported = obj->uevent.events_reported;
3029 3030

	uobj_put_destroy(uobj);
Y
Yishai Hadas 已提交
3031

3032
	return uverbs_response(attrs, &resp, sizeof(resp));
Y
Yishai Hadas 已提交
3033 3034
}

3035
static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
Y
Yishai Hadas 已提交
3036
{
3037
	struct ib_uverbs_ex_modify_wq cmd;
Y
Yishai Hadas 已提交
3038 3039 3040 3041
	struct ib_wq *wq;
	struct ib_wq_attr wq_attr = {};
	int ret;

3042
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
Y
Yishai Hadas 已提交
3043 3044 3045 3046 3047 3048
	if (ret)
		return ret;

	if (!cmd.attr_mask)
		return -EINVAL;

3049
	if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
Y
Yishai Hadas 已提交
3050 3051
		return -EINVAL;

3052
	wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
Y
Yishai Hadas 已提交
3053 3054 3055 3056 3057
	if (!wq)
		return -EINVAL;

	wq_attr.curr_wq_state = cmd.curr_wq_state;
	wq_attr.wq_state = cmd.wq_state;
3058 3059 3060 3061
	if (cmd.attr_mask & IB_WQ_FLAGS) {
		wq_attr.flags = cmd.flags;
		wq_attr.flags_mask = cmd.flags_mask;
	}
K
Kamal Heib 已提交
3062 3063
	ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
					&attrs->driver_udata);
3064
	uobj_put_obj_read(wq);
Y
Yishai Hadas 已提交
3065 3066 3067
	return ret;
}

3068
static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3069
{
3070
	struct ib_uverbs_ex_create_rwq_ind_table cmd;
3071 3072
	struct ib_uverbs_ex_create_rwq_ind_table_resp  resp = {};
	struct ib_uobject		  *uobj;
3073
	int err;
3074 3075 3076 3077 3078 3079 3080
	struct ib_rwq_ind_table_init_attr init_attr = {};
	struct ib_rwq_ind_table *rwq_ind_tbl;
	struct ib_wq	**wqs = NULL;
	u32 *wqs_handles = NULL;
	struct ib_wq	*wq = NULL;
	int i, j, num_read_wqs;
	u32 num_wq_handles;
3081
	struct uverbs_req_iter iter;
3082
	struct ib_device *ib_dev;
3083

3084
	err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099
	if (err)
		return err;

	if (cmd.comp_mask)
		return -EOPNOTSUPP;

	if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
		return -EINVAL;

	num_wq_handles = 1 << cmd.log_ind_tbl_size;
	wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
			      GFP_KERNEL);
	if (!wqs_handles)
		return -ENOMEM;

3100 3101 3102 3103 3104 3105
	err = uverbs_request_next(&iter, wqs_handles,
				  num_wq_handles * sizeof(__u32));
	if (err)
		goto err_free;

	err = uverbs_request_finish(&iter);
3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116
	if (err)
		goto err_free;

	wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
	if (!wqs) {
		err = -ENOMEM;
		goto  err_free;
	}

	for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
			num_read_wqs++) {
3117
		wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
3118
				       wqs_handles[num_read_wqs], attrs);
3119 3120 3121 3122 3123 3124 3125 3126
		if (!wq) {
			err = -EINVAL;
			goto put_wqs;
		}

		wqs[num_read_wqs] = wq;
	}

3127
	uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev);
3128 3129
	if (IS_ERR(uobj)) {
		err = PTR_ERR(uobj);
3130 3131 3132 3133 3134
		goto put_wqs;
	}

	init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
	init_attr.ind_tbl = wqs;
3135

K
Kamal Heib 已提交
3136 3137
	rwq_ind_tbl = ib_dev->ops.create_rwq_ind_table(ib_dev, &init_attr,
						       &attrs->driver_udata);
3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155

	if (IS_ERR(rwq_ind_tbl)) {
		err = PTR_ERR(rwq_ind_tbl);
		goto err_uobj;
	}

	rwq_ind_tbl->ind_tbl = wqs;
	rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
	rwq_ind_tbl->uobject = uobj;
	uobj->object = rwq_ind_tbl;
	rwq_ind_tbl->device = ib_dev;
	atomic_set(&rwq_ind_tbl->usecnt, 0);

	for (i = 0; i < num_wq_handles; i++)
		atomic_inc(&wqs[i]->usecnt);

	resp.ind_tbl_handle = uobj->id;
	resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3156
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3157

3158
	err = uverbs_response(attrs, &resp, sizeof(resp));
3159 3160 3161 3162 3163 3164
	if (err)
		goto err_copy;

	kfree(wqs_handles);

	for (j = 0; j < num_read_wqs; j++)
3165
		uobj_put_obj_read(wqs[j]);
3166

3167 3168
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
3169 3170 3171 3172

err_copy:
	ib_destroy_rwq_ind_table(rwq_ind_tbl);
err_uobj:
3173
	uobj_alloc_abort(uobj, attrs);
3174 3175
put_wqs:
	for (j = 0; j < num_read_wqs; j++)
3176
		uobj_put_obj_read(wqs[j]);
3177 3178 3179 3180 3181 3182
err_free:
	kfree(wqs_handles);
	kfree(wqs);
	return err;
}

3183
static int ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3184
{
3185 3186
	struct ib_uverbs_ex_destroy_rwq_ind_table cmd;
	int ret;
3187

3188
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3189 3190 3191 3192 3193 3194
	if (ret)
		return ret;

	if (cmd.comp_mask)
		return -EOPNOTSUPP;

3195
	return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL,
3196
				    cmd.ind_tbl_handle, attrs);
3197 3198
}

3199
static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
3200 3201 3202 3203 3204
{
	struct ib_uverbs_create_flow	  cmd;
	struct ib_uverbs_create_flow_resp resp;
	struct ib_uobject		  *uobj;
	struct ib_flow			  *flow_id;
3205
	struct ib_uverbs_flow_attr	  *kern_flow_attr;
3206 3207
	struct ib_flow_attr		  *flow_attr;
	struct ib_qp			  *qp;
3208
	struct ib_uflow_resources	  *uflow_res;
3209
	struct ib_uverbs_flow_spec_hdr	  *kern_spec;
3210 3211
	struct uverbs_req_iter iter;
	int err;
3212 3213
	void *ib_spec;
	int i;
3214
	struct ib_device *ib_dev;
3215

3216
	err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3217 3218 3219
	if (err)
		return err;

3220 3221 3222
	if (cmd.comp_mask)
		return -EINVAL;

3223
	if (!capable(CAP_NET_RAW))
3224 3225
		return -EPERM;

3226 3227 3228 3229 3230 3231 3232 3233
	if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
		return -EINVAL;

	if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
	    ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
	     (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
		return -EINVAL;

3234
	if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3235 3236
		return -EINVAL;

3237
	if (cmd.flow_attr.size >
3238
	    (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3239 3240
		return -EINVAL;

3241 3242 3243 3244
	if (cmd.flow_attr.reserved[0] ||
	    cmd.flow_attr.reserved[1])
		return -EINVAL;

3245
	if (cmd.flow_attr.num_of_specs) {
3246 3247
		kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
					 GFP_KERNEL);
3248 3249 3250
		if (!kern_flow_attr)
			return -ENOMEM;

3251
		*kern_flow_attr = cmd.flow_attr;
3252 3253
		err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs,
					  cmd.flow_attr.size);
3254
		if (err)
3255 3256 3257 3258 3259
			goto err_free_attr;
	} else {
		kern_flow_attr = &cmd.flow_attr;
	}

3260 3261 3262 3263
	err = uverbs_request_finish(&iter);
	if (err)
		goto err_free_attr;

3264
	uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev);
3265 3266
	if (IS_ERR(uobj)) {
		err = PTR_ERR(uobj);
3267 3268 3269
		goto err_free_attr;
	}

3270
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
3271 3272 3273 3274 3275
	if (!qp) {
		err = -EINVAL;
		goto err_uobj;
	}

3276 3277 3278 3279 3280
	if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
		err = -EINVAL;
		goto err_put;
	}

3281 3282
	flow_attr = kzalloc(struct_size(flow_attr, flows,
				cmd.flow_attr.num_of_specs), GFP_KERNEL);
3283 3284 3285 3286
	if (!flow_attr) {
		err = -ENOMEM;
		goto err_put;
	}
3287 3288 3289 3290 3291
	uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs);
	if (!uflow_res) {
		err = -ENOMEM;
		goto err_free_flow_attr;
	}
3292 3293 3294 3295 3296 3297 3298 3299

	flow_attr->type = kern_flow_attr->type;
	flow_attr->priority = kern_flow_attr->priority;
	flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
	flow_attr->port = kern_flow_attr->port;
	flow_attr->flags = kern_flow_attr->flags;
	flow_attr->size = sizeof(*flow_attr);

3300
	kern_spec = kern_flow_attr->flow_specs;
3301
	ib_spec = flow_attr + 1;
3302
	for (i = 0; i < flow_attr->num_of_specs &&
3303
			cmd.flow_attr.size >= sizeof(*kern_spec) &&
3304 3305 3306
			cmd.flow_attr.size >= kern_spec->size;
	     i++) {
		err = kern_spec_to_ib_spec(
3307
				attrs, (struct ib_uverbs_flow_spec *)kern_spec,
3308
				ib_spec, uflow_res);
3309 3310
		if (err)
			goto err_free;
3311

3312 3313
		flow_attr->size +=
			((union ib_flow_spec *) ib_spec)->size;
3314 3315
		cmd.flow_attr.size -= kern_spec->size;
		kern_spec = ((void *)kern_spec) + kern_spec->size;
3316 3317
		ib_spec += ((union ib_flow_spec *) ib_spec)->size;
	}
3318 3319 3320
	if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
		pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
			i, cmd.flow_attr.size);
3321
		err = -EINVAL;
3322 3323
		goto err_free;
	}
3324

K
Kamal Heib 已提交
3325 3326
	flow_id = qp->device->ops.create_flow(
		qp, flow_attr, IB_FLOW_DOMAIN_USER, &attrs->driver_udata);
3327

3328 3329
	if (IS_ERR(flow_id)) {
		err = PTR_ERR(flow_id);
3330
		goto err_free;
3331
	}
3332 3333

	ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
3334 3335 3336 3337

	memset(&resp, 0, sizeof(resp));
	resp.flow_handle = uobj->id;

3338
	err = uverbs_response(attrs, &resp, sizeof(resp));
3339
	if (err)
3340 3341
		goto err_copy;

3342
	uobj_put_obj_read(qp);
3343 3344 3345
	kfree(flow_attr);
	if (cmd.flow_attr.num_of_specs)
		kfree(kern_flow_attr);
3346 3347
	rdma_alloc_commit_uobject(uobj, attrs);
	return 0;
3348
err_copy:
K
Kamal Heib 已提交
3349
	if (!qp->device->ops.destroy_flow(flow_id))
3350
		atomic_dec(&qp->usecnt);
3351
err_free:
3352 3353
	ib_uverbs_flow_resources_free(uflow_res);
err_free_flow_attr:
3354 3355
	kfree(flow_attr);
err_put:
3356
	uobj_put_obj_read(qp);
3357
err_uobj:
3358
	uobj_alloc_abort(uobj, attrs);
3359 3360 3361 3362 3363 3364
err_free_attr:
	if (cmd.flow_attr.num_of_specs)
		kfree(kern_flow_attr);
	return err;
}

3365
static int ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle *attrs)
3366
{
3367 3368 3369
	struct ib_uverbs_destroy_flow	cmd;
	int				ret;

3370
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3371 3372
	if (ret)
		return ret;
3373

3374 3375 3376
	if (cmd.comp_mask)
		return -EINVAL;

3377
	return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, attrs);
3378 3379
}

3380
static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
S
Sean Hefty 已提交
3381 3382
				struct ib_uverbs_create_xsrq *cmd,
				struct ib_udata *udata)
3383 3384
{
	struct ib_uverbs_create_srq_resp resp;
3385
	struct ib_usrq_object           *obj;
3386 3387
	struct ib_pd                    *pd;
	struct ib_srq                   *srq;
3388
	struct ib_uobject               *uninitialized_var(xrcd_uobj);
3389 3390
	struct ib_srq_init_attr          attr;
	int ret;
3391
	struct ib_device *ib_dev;
3392

3393
	obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, attrs,
3394
						  &ib_dev);
3395 3396
	if (IS_ERR(obj))
		return PTR_ERR(obj);
3397

3398 3399 3400
	if (cmd->srq_type == IB_SRQT_TM)
		attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;

3401
	if (cmd->srq_type == IB_SRQT_XRC) {
3402
		xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
3403
					  attrs);
3404
		if (IS_ERR(xrcd_uobj)) {
3405
			ret = -EINVAL;
3406
			goto err;
3407 3408
		}

3409 3410 3411 3412 3413 3414
		attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
		if (!attr.ext.xrc.xrcd) {
			ret = -EINVAL;
			goto err_put_xrcd;
		}

3415 3416
		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
		atomic_inc(&obj->uxrcd->refcnt);
3417
	}
3418

3419
	if (ib_srq_has_cq(cmd->srq_type)) {
3420
		attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
3421
						cmd->cq_handle, attrs);
3422
		if (!attr.ext.cq) {
3423 3424 3425 3426 3427
			ret = -EINVAL;
			goto err_put_xrcd;
		}
	}

3428
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
3429 3430 3431
	if (!pd) {
		ret = -EINVAL;
		goto err_put_cq;
3432 3433
	}

3434
	attr.event_handler  = ib_uverbs_srq_event_handler;
3435
	attr.srq_context    = attrs->ufile;
3436 3437 3438 3439
	attr.srq_type       = cmd->srq_type;
	attr.attr.max_wr    = cmd->max_wr;
	attr.attr.max_sge   = cmd->max_sge;
	attr.attr.srq_limit = cmd->srq_limit;
3440

3441 3442
	obj->uevent.events_reported = 0;
	INIT_LIST_HEAD(&obj->uevent.event_list);
3443

3444 3445 3446
	srq = rdma_zalloc_drv_obj(ib_dev, ib_srq);
	if (!srq) {
		ret = -ENOMEM;
3447
		goto err_put;
3448 3449
	}

3450 3451 3452 3453
	srq->device        = pd->device;
	srq->pd            = pd;
	srq->srq_type	   = cmd->srq_type;
	srq->uobject       = &obj->uevent.uobject;
3454 3455
	srq->event_handler = attr.event_handler;
	srq->srq_context   = attr.srq_context;
3456

3457 3458 3459 3460
	ret = pd->device->ops.create_srq(srq, &attr, udata);
	if (ret)
		goto err_free;

3461 3462 3463 3464 3465
	if (ib_srq_has_cq(cmd->srq_type)) {
		srq->ext.cq       = attr.ext.cq;
		atomic_inc(&attr.ext.cq->usecnt);
	}

3466 3467 3468 3469 3470
	if (cmd->srq_type == IB_SRQT_XRC) {
		srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
		atomic_inc(&attr.ext.xrc.xrcd->usecnt);
	}

3471 3472 3473
	atomic_inc(&pd->usecnt);
	atomic_set(&srq->usecnt, 0);

3474
	obj->uevent.uobject.object = srq;
3475
	obj->uevent.uobject.user_handle = cmd->user_handle;
3476

3477
	memset(&resp, 0, sizeof resp);
3478
	resp.srq_handle = obj->uevent.uobject.id;
3479 3480
	resp.max_wr     = attr.attr.max_wr;
	resp.max_sge    = attr.attr.max_sge;
3481 3482
	if (cmd->srq_type == IB_SRQT_XRC)
		resp.srqn = srq->ext.xrc.srq_num;
3483

3484 3485
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
3486
		goto err_copy;
3487

3488
	if (cmd->srq_type == IB_SRQT_XRC)
3489
		uobj_put_read(xrcd_uobj);
3490 3491

	if (ib_srq_has_cq(cmd->srq_type))
3492 3493
		rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
3494

3495
	uobj_put_obj_read(pd);
3496 3497
	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
	return 0;
3498

3499
err_copy:
3500
	ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
3501 3502
	/* It was released in ib_destroy_srq_user */
	srq = NULL;
3503 3504
err_free:
	kfree(srq);
3505
err_put:
3506
	uobj_put_obj_read(pd);
3507 3508

err_put_cq:
3509
	if (ib_srq_has_cq(cmd->srq_type))
3510 3511
		rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
					UVERBS_LOOKUP_READ);
3512

3513 3514 3515
err_put_xrcd:
	if (cmd->srq_type == IB_SRQT_XRC) {
		atomic_dec(&obj->uxrcd->refcnt);
3516
		uobj_put_read(xrcd_uobj);
3517
	}
3518

3519
err:
3520
	uobj_alloc_abort(&obj->uevent.uobject, attrs);
3521 3522 3523
	return ret;
}

3524
static int ib_uverbs_create_srq(struct uverbs_attr_bundle *attrs)
3525 3526 3527
{
	struct ib_uverbs_create_srq      cmd;
	struct ib_uverbs_create_xsrq     xcmd;
3528
	int ret;
3529

3530 3531 3532
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3533

3534
	memset(&xcmd, 0, sizeof(xcmd));
3535 3536 3537 3538 3539 3540 3541 3542
	xcmd.response	 = cmd.response;
	xcmd.user_handle = cmd.user_handle;
	xcmd.srq_type	 = IB_SRQT_BASIC;
	xcmd.pd_handle	 = cmd.pd_handle;
	xcmd.max_wr	 = cmd.max_wr;
	xcmd.max_sge	 = cmd.max_sge;
	xcmd.srq_limit	 = cmd.srq_limit;

3543
	return __uverbs_create_xsrq(attrs, &xcmd, &attrs->driver_udata);
3544 3545
}

3546
static int ib_uverbs_create_xsrq(struct uverbs_attr_bundle *attrs)
3547 3548
{
	struct ib_uverbs_create_xsrq     cmd;
3549
	int ret;
3550

3551 3552 3553
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3554

3555
	return __uverbs_create_xsrq(attrs, &cmd, &attrs->driver_udata);
3556 3557
}

3558
static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
3559 3560 3561 3562 3563 3564
{
	struct ib_uverbs_modify_srq cmd;
	struct ib_srq              *srq;
	struct ib_srq_attr          attr;
	int                         ret;

3565 3566 3567
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3568

3569
	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3570 3571
	if (!srq)
		return -EINVAL;
3572 3573 3574 3575

	attr.max_wr    = cmd.max_wr;
	attr.srq_limit = cmd.srq_limit;

K
Kamal Heib 已提交
3576 3577
	ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask,
					  &attrs->driver_udata);
3578

3579
	uobj_put_obj_read(srq);
3580

3581
	return ret;
3582 3583
}

3584
static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
3585 3586 3587 3588 3589 3590 3591
{
	struct ib_uverbs_query_srq      cmd;
	struct ib_uverbs_query_srq_resp resp;
	struct ib_srq_attr              attr;
	struct ib_srq                   *srq;
	int                             ret;

3592 3593 3594
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3595

3596
	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3597 3598
	if (!srq)
		return -EINVAL;
3599

3600
	ret = ib_query_srq(srq, &attr);
3601

3602
	uobj_put_obj_read(srq);
3603 3604

	if (ret)
3605
		return ret;
3606 3607 3608 3609 3610 3611 3612

	memset(&resp, 0, sizeof resp);

	resp.max_wr    = attr.max_wr;
	resp.max_sge   = attr.max_sge;
	resp.srq_limit = attr.srq_limit;

3613
	return uverbs_response(attrs, &resp, sizeof(resp));
3614 3615
}

3616
static int ib_uverbs_destroy_srq(struct uverbs_attr_bundle *attrs)
3617
{
3618 3619
	struct ib_uverbs_destroy_srq      cmd;
	struct ib_uverbs_destroy_srq_resp resp;
3620 3621
	struct ib_uobject		 *uobj;
	struct ib_uevent_object        	 *obj;
3622
	int ret;
3623

3624 3625 3626
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3627

3628
	uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3629 3630 3631
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);

3632
	obj = container_of(uobj, struct ib_uevent_object, uobject);
3633
	memset(&resp, 0, sizeof(resp));
3634
	resp.events_reported = obj->events_reported;
3635 3636 3637

	uobj_put_destroy(uobj);

3638
	return uverbs_response(attrs, &resp, sizeof(resp));
3639
}
3640

3641
static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
3642
{
3643
	struct ib_uverbs_ex_query_device_resp resp = {};
3644
	struct ib_uverbs_ex_query_device  cmd;
3645
	struct ib_device_attr attr = {0};
3646 3647
	struct ib_ucontext *ucontext;
	struct ib_device *ib_dev;
3648 3649
	int err;

3650
	ucontext = ib_uverbs_get_ucontext(attrs);
3651 3652 3653 3654
	if (IS_ERR(ucontext))
		return PTR_ERR(ucontext);
	ib_dev = ucontext->device;

3655
	err = uverbs_request(attrs, &cmd, sizeof(cmd));
3656 3657 3658 3659 3660 3661 3662 3663 3664
	if (err)
		return err;

	if (cmd.comp_mask)
		return -EINVAL;

	if (cmd.reserved)
		return -EINVAL;

K
Kamal Heib 已提交
3665
	err = ib_dev->ops.query_device(ib_dev, &attr, &attrs->driver_udata);
3666 3667 3668
	if (err)
		return err;

3669
	copy_query_dev_fields(ucontext, &resp.base, &attr);
3670

3671 3672 3673 3674 3675 3676 3677
	resp.odp_caps.general_caps = attr.odp_caps.general_caps;
	resp.odp_caps.per_transport_caps.rc_odp_caps =
		attr.odp_caps.per_transport_caps.rc_odp_caps;
	resp.odp_caps.per_transport_caps.uc_odp_caps =
		attr.odp_caps.per_transport_caps.uc_odp_caps;
	resp.odp_caps.per_transport_caps.ud_odp_caps =
		attr.odp_caps.per_transport_caps.ud_odp_caps;
3678
	resp.xrc_odp_caps = attr.odp_caps.per_transport_caps.xrc_odp_caps;
3679 3680 3681

	resp.timestamp_mask = attr.timestamp_mask;
	resp.hca_core_clock = attr.hca_core_clock;
3682
	resp.device_cap_flags_ex = attr.device_cap_flags;
3683 3684 3685 3686 3687 3688
	resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
	resp.rss_caps.max_rwq_indirection_tables =
		attr.rss_caps.max_rwq_indirection_tables;
	resp.rss_caps.max_rwq_indirection_table_size =
		attr.rss_caps.max_rwq_indirection_table_size;
	resp.max_wq_type_rq = attr.max_wq_type_rq;
3689
	resp.raw_packet_caps = attr.raw_packet_caps;
3690 3691 3692 3693 3694
	resp.tm_caps.max_rndv_hdr_size	= attr.tm_caps.max_rndv_hdr_size;
	resp.tm_caps.max_num_tags	= attr.tm_caps.max_num_tags;
	resp.tm_caps.max_ops		= attr.tm_caps.max_ops;
	resp.tm_caps.max_sge		= attr.tm_caps.max_sge;
	resp.tm_caps.flags		= attr.tm_caps.flags;
3695 3696 3697 3698
	resp.cq_moderation_caps.max_cq_moderation_count  =
		attr.cq_caps.max_cq_moderation_count;
	resp.cq_moderation_caps.max_cq_moderation_period =
		attr.cq_caps.max_cq_moderation_period;
3699
	resp.max_dm_size = attr.max_dm_size;
3700 3701
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));

3702
	return uverbs_response(attrs, &resp, sizeof(resp));
3703
}
3704

3705
static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
3706
{
3707
	struct ib_uverbs_ex_modify_cq cmd;
3708 3709 3710
	struct ib_cq *cq;
	int ret;

3711
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3712 3713 3714 3715 3716 3717 3718 3719 3720
	if (ret)
		return ret;

	if (!cmd.attr_mask || cmd.reserved)
		return -EINVAL;

	if (cmd.attr_mask > IB_CQ_MODERATE)
		return -EOPNOTSUPP;

3721
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
3722 3723 3724
	if (!cq)
		return -EINVAL;

3725
	ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
3726

3727 3728
	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
				UVERBS_LOOKUP_READ);
3729 3730
	return ret;
}
3731

3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770
/*
 * Describe the input structs for write(). Some write methods have an input
 * only struct, most have an input and output. If the struct has an output then
 * the 'response' u64 must be the first field in the request structure.
 *
 * If udata is present then both the request and response structs have a
 * trailing driver_data flex array. In this case the size of the base struct
 * cannot be changed.
 */
#define UAPI_DEF_WRITE_IO(req, resp)                                           \
	.write.has_resp = 1 +                                                  \
			  BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) +    \
			  BUILD_BUG_ON_ZERO(sizeof(((req *)0)->response) !=    \
					    sizeof(u64)),                      \
	.write.req_size = sizeof(req), .write.resp_size = sizeof(resp)

#define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req)

#define UAPI_DEF_WRITE_UDATA_IO(req, resp)                                     \
	UAPI_DEF_WRITE_IO(req, resp),                                          \
		.write.has_udata =                                             \
			1 +                                                    \
			BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=        \
					  sizeof(req)) +                       \
			BUILD_BUG_ON_ZERO(offsetof(resp, driver_data) !=       \
					  sizeof(resp))

#define UAPI_DEF_WRITE_UDATA_I(req)                                            \
	UAPI_DEF_WRITE_I(req),                                                 \
		.write.has_udata =                                             \
			1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=    \
					      sizeof(req))

/*
 * The _EX versions are for use with WRITE_EX and allow the last struct member
 * to be specified. Buffers that do not include that member will be rejected.
 */
#define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member)     \
	.write.has_resp = 1,                                                   \
3771 3772
	.write.req_size = offsetofend(req, req_last_member),                   \
	.write.resp_size = offsetofend(resp, resp_last_member)
3773 3774

#define UAPI_DEF_WRITE_I_EX(req, req_last_member)                              \
3775
	.write.req_size = offsetofend(req, req_last_member)
3776

3777
const struct uapi_definition uverbs_def_write_intf[] = {
3778 3779 3780 3781
	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_AH,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_AH,
				     ib_uverbs_create_ah,
3782 3783 3784
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_ah,
					     struct ib_uverbs_create_ah_resp),
3785
				     UAPI_DEF_METHOD_NEEDS_FN(create_ah)),
3786 3787 3788 3789 3790
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DESTROY_AH,
			ib_uverbs_destroy_ah,
			UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_ah))),
3791 3792 3793

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_COMP_CHANNEL,
3794 3795 3796 3797 3798 3799
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL,
			ib_uverbs_create_comp_channel,
			UAPI_DEF_WRITE_IO(
				struct ib_uverbs_create_comp_channel,
				struct ib_uverbs_create_comp_channel_resp))),
3800 3801 3802 3803

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_CQ,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_CQ,
3804
				     ib_uverbs_create_cq,
3805 3806 3807
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_cq,
					     struct ib_uverbs_create_cq_resp),
3808
				     UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DESTROY_CQ,
			ib_uverbs_destroy_cq,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_cq,
					  struct ib_uverbs_destroy_cq_resp),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_cq)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_POLL_CQ,
			ib_uverbs_poll_cq,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_poll_cq,
					  struct ib_uverbs_poll_cq_resp),
			UAPI_DEF_METHOD_NEEDS_FN(poll_cq)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
			ib_uverbs_req_notify_cq,
			UAPI_DEF_WRITE_I(struct ib_uverbs_req_notify_cq),
			UAPI_DEF_METHOD_NEEDS_FN(req_notify_cq)),
3826
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_RESIZE_CQ,
3827
				     ib_uverbs_resize_cq,
3828 3829 3830
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_resize_cq,
					     struct ib_uverbs_resize_cq_resp),
3831
				     UAPI_DEF_METHOD_NEEDS_FN(resize_cq)),
3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_CQ,
			ib_uverbs_ex_create_cq,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_cq,
					     reserved,
					     struct ib_uverbs_ex_create_cq_resp,
					     response_length),
			UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_MODIFY_CQ,
			ib_uverbs_ex_modify_cq,
			UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq),
			UAPI_DEF_METHOD_NEEDS_FN(create_cq))),
3845 3846 3847 3848

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_DEVICE,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_GET_CONTEXT,
3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872
				     ib_uverbs_get_context,
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_get_context,
					     struct ib_uverbs_get_context_resp)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_QUERY_DEVICE,
			ib_uverbs_query_device,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_device,
					  struct ib_uverbs_query_device_resp)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_QUERY_PORT,
			ib_uverbs_query_port,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_port,
					  struct ib_uverbs_query_port_resp),
			UAPI_DEF_METHOD_NEEDS_FN(query_port)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_QUERY_DEVICE,
			ib_uverbs_ex_query_device,
			UAPI_DEF_WRITE_IO_EX(
				struct ib_uverbs_ex_query_device,
				reserved,
				struct ib_uverbs_ex_query_device_resp,
				response_length),
			UAPI_DEF_METHOD_NEEDS_FN(query_device)),
3873 3874
		UAPI_DEF_OBJ_NEEDS_FN(alloc_ucontext),
		UAPI_DEF_OBJ_NEEDS_FN(dealloc_ucontext)),
3875 3876 3877

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_FLOW,
3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_FLOW,
			ib_uverbs_ex_create_flow,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_create_flow,
					     flow_attr,
					     struct ib_uverbs_create_flow_resp,
					     flow_handle),
			UAPI_DEF_METHOD_NEEDS_FN(create_flow)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
			ib_uverbs_ex_destroy_flow,
			UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_flow),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_flow))),
3891 3892 3893 3894 3895

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_MR,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_DEREG_MR,
				     ib_uverbs_dereg_mr,
3896
				     UAPI_DEF_WRITE_I(struct ib_uverbs_dereg_mr),
3897
				     UAPI_DEF_METHOD_NEEDS_FN(dereg_mr)),
3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_REG_MR,
			ib_uverbs_reg_mr,
			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_reg_mr,
						struct ib_uverbs_reg_mr_resp),
			UAPI_DEF_METHOD_NEEDS_FN(reg_user_mr)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_REREG_MR,
			ib_uverbs_rereg_mr,
			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_rereg_mr,
						struct ib_uverbs_rereg_mr_resp),
			UAPI_DEF_METHOD_NEEDS_FN(rereg_user_mr))),
3910 3911 3912

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_MW,
3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_ALLOC_MW,
			ib_uverbs_alloc_mw,
			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_mw,
						struct ib_uverbs_alloc_mw_resp),
			UAPI_DEF_METHOD_NEEDS_FN(alloc_mw)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DEALLOC_MW,
			ib_uverbs_dealloc_mw,
			UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_mw),
			UAPI_DEF_METHOD_NEEDS_FN(dealloc_mw))),
3924 3925 3926

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_PD,
3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_ALLOC_PD,
			ib_uverbs_alloc_pd,
			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_pd,
						struct ib_uverbs_alloc_pd_resp),
			UAPI_DEF_METHOD_NEEDS_FN(alloc_pd)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DEALLOC_PD,
			ib_uverbs_dealloc_pd,
			UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_pd),
			UAPI_DEF_METHOD_NEEDS_FN(dealloc_pd))),
3938 3939 3940

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_QP,
3941 3942 3943 3944 3945 3946
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_ATTACH_MCAST,
			ib_uverbs_attach_mcast,
			UAPI_DEF_WRITE_I(struct ib_uverbs_attach_mcast),
			UAPI_DEF_METHOD_NEEDS_FN(attach_mcast),
			UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
3947
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_QP,
3948
				     ib_uverbs_create_qp,
3949 3950 3951
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_qp,
					     struct ib_uverbs_create_qp_resp),
3952
				     UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DESTROY_QP,
			ib_uverbs_destroy_qp,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_qp,
					  struct ib_uverbs_destroy_qp_resp),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_qp)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DETACH_MCAST,
			ib_uverbs_detach_mcast,
			UAPI_DEF_WRITE_I(struct ib_uverbs_detach_mcast),
			UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_MODIFY_QP,
			ib_uverbs_modify_qp,
			UAPI_DEF_WRITE_I(struct ib_uverbs_modify_qp),
			UAPI_DEF_METHOD_NEEDS_FN(modify_qp)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_POST_RECV,
			ib_uverbs_post_recv,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_post_recv,
					  struct ib_uverbs_post_recv_resp),
			UAPI_DEF_METHOD_NEEDS_FN(post_recv)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_POST_SEND,
			ib_uverbs_post_send,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_post_send,
					  struct ib_uverbs_post_send_resp),
			UAPI_DEF_METHOD_NEEDS_FN(post_send)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_QUERY_QP,
			ib_uverbs_query_qp,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_qp,
					  struct ib_uverbs_query_qp_resp),
			UAPI_DEF_METHOD_NEEDS_FN(query_qp)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_QP,
			ib_uverbs_ex_create_qp,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_qp,
					     comp_mask,
					     struct ib_uverbs_ex_create_qp_resp,
					     response_length),
			UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_MODIFY_QP,
			ib_uverbs_ex_modify_qp,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_modify_qp,
					     base,
					     struct ib_uverbs_ex_modify_qp_resp,
					     response_length),
			UAPI_DEF_METHOD_NEEDS_FN(modify_qp))),
4003 4004 4005

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_RWQ_IND_TBL,
4006 4007 4008
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
			ib_uverbs_ex_create_rwq_ind_table,
4009 4010 4011 4012 4013
			UAPI_DEF_WRITE_IO_EX(
				struct ib_uverbs_ex_create_rwq_ind_table,
				log_ind_tbl_size,
				struct ib_uverbs_ex_create_rwq_ind_table_resp,
				ind_tbl_num),
4014 4015 4016 4017
			UAPI_DEF_METHOD_NEEDS_FN(create_rwq_ind_table)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
			ib_uverbs_ex_destroy_rwq_ind_table,
4018 4019
			UAPI_DEF_WRITE_I(
				struct ib_uverbs_ex_destroy_rwq_ind_table),
4020
			UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))),
4021 4022 4023

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_WQ,
4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_WQ,
			ib_uverbs_ex_create_wq,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_wq,
					     max_sge,
					     struct ib_uverbs_ex_create_wq_resp,
					     wqn),
			UAPI_DEF_METHOD_NEEDS_FN(create_wq)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_DESTROY_WQ,
			ib_uverbs_ex_destroy_wq,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_destroy_wq,
					     wq_handle,
					     struct ib_uverbs_ex_destroy_wq_resp,
					     reserved),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_wq)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_MODIFY_WQ,
			ib_uverbs_ex_modify_wq,
			UAPI_DEF_WRITE_I_EX(struct ib_uverbs_ex_modify_wq,
					    curr_wq_state),
			UAPI_DEF_METHOD_NEEDS_FN(modify_wq))),
4046 4047 4048 4049

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_SRQ,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_SRQ,
4050
				     ib_uverbs_create_srq,
4051 4052 4053
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_srq,
					     struct ib_uverbs_create_srq_resp),
4054
				     UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
4055
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_XSRQ,
4056
				     ib_uverbs_create_xsrq,
4057 4058 4059
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_xsrq,
					     struct ib_uverbs_create_srq_resp),
4060
				     UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DESTROY_SRQ,
			ib_uverbs_destroy_srq,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_srq,
					  struct ib_uverbs_destroy_srq_resp),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_srq)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_MODIFY_SRQ,
			ib_uverbs_modify_srq,
			UAPI_DEF_WRITE_UDATA_I(struct ib_uverbs_modify_srq),
			UAPI_DEF_METHOD_NEEDS_FN(modify_srq)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_POST_SRQ_RECV,
			ib_uverbs_post_srq_recv,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_post_srq_recv,
					  struct ib_uverbs_post_srq_recv_resp),
			UAPI_DEF_METHOD_NEEDS_FN(post_srq_recv)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_QUERY_SRQ,
			ib_uverbs_query_srq,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_srq,
					  struct ib_uverbs_query_srq_resp),
			UAPI_DEF_METHOD_NEEDS_FN(query_srq))),
4084 4085 4086

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_XRCD,
4087 4088 4089 4090 4091
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_CLOSE_XRCD,
			ib_uverbs_close_xrcd,
			UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd),
			UAPI_DEF_METHOD_NEEDS_FN(dealloc_xrcd)),
4092
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP,
4093 4094 4095 4096
				     ib_uverbs_open_qp,
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_open_qp,
					     struct ib_uverbs_create_qp_resp)),
4097 4098
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_XRCD,
				     ib_uverbs_open_xrcd,
4099 4100 4101
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_open_xrcd,
					     struct ib_uverbs_open_xrcd_resp),
4102
				     UAPI_DEF_METHOD_NEEDS_FN(alloc_xrcd))),
4103 4104 4105

	{},
};