uverbs_cmd.c 108.4 KB
Newer Older
1 2
/*
 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3
 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
5
 * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

36
#include <linux/file.h>
37
#include <linux/fs.h>
38
#include <linux/slab.h>
39
#include <linux/sched.h>
40

41
#include <linux/uaccess.h>
42

43 44 45 46
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_std_types.h>
#include "rdma_core.h"

47
#include "uverbs.h"
48
#include "core_priv.h"
49

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
/*
 * Copy a response to userspace. If the provided 'resp' is larger than the
 * user buffer it is silently truncated. If the user provided a larger buffer
 * then the trailing portion is zero filled.
 *
 * These semantics are intended to support future extension of the output
 * structures.
 */
static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
			   size_t resp_len)
{
	u8 __user *cur = attrs->ucore.outbuf + resp_len;
	u8 __user *end = attrs->ucore.outbuf + attrs->ucore.outlen;
	int ret;

	if (copy_to_user(attrs->ucore.outbuf, resp,
			 min(attrs->ucore.outlen, resp_len)))
		return -EFAULT;

	/* Zero fill any extra memory that user space might have provided */
	for (; cur < end; cur++) {
		ret = put_user(0, cur);
		if (ret)
			return ret;
	}

	return 0;
}

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
/*
 * Copy a request from userspace. If the provided 'req' is larger than the
 * user buffer then the user buffer is zero extended into the 'req'. If 'req'
 * is smaller than the user buffer then the uncopied bytes in the user buffer
 * must be zero.
 */
static int uverbs_request(struct uverbs_attr_bundle *attrs, void *req,
			  size_t req_len)
{
	if (copy_from_user(req, attrs->ucore.inbuf,
			   min(attrs->ucore.inlen, req_len)))
		return -EFAULT;

	if (attrs->ucore.inlen < req_len) {
		memset(req + attrs->ucore.inlen, 0,
		       req_len - attrs->ucore.inlen);
	} else if (attrs->ucore.inlen > req_len) {
		if (!ib_is_buffer_cleared(attrs->ucore.inbuf + req_len,
					  attrs->ucore.inlen - req_len))
			return -EOPNOTSUPP;
	}
	return 0;
}

103 104 105 106 107 108 109 110 111 112 113 114
/*
 * Generate the value for the 'response_length' protocol used by write_ex.
 * This is the number of bytes the kernel actually wrote. Userspace can use
 * this to detect what structure members in the response the kernel
 * understood.
 */
static u32 uverbs_response_length(struct uverbs_attr_bundle *attrs,
				  size_t resp_len)
{
	return min_t(size_t, attrs->ucore.outlen, resp_len);
}

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
/*
 * The iterator version of the request interface is for handlers that need to
 * step over a flex array at the end of a command header.
 */
struct uverbs_req_iter {
	const void __user *cur;
	const void __user *end;
};

static int uverbs_request_start(struct uverbs_attr_bundle *attrs,
				struct uverbs_req_iter *iter,
				void *req,
				size_t req_len)
{
	if (attrs->ucore.inlen < req_len)
		return -ENOSPC;

	if (copy_from_user(req, attrs->ucore.inbuf, req_len))
		return -EFAULT;

	iter->cur = attrs->ucore.inbuf + req_len;
	iter->end = attrs->ucore.inbuf + attrs->ucore.inlen;
	return 0;
}

static int uverbs_request_next(struct uverbs_req_iter *iter, void *val,
			       size_t len)
{
	if (iter->cur + len > iter->end)
		return -ENOSPC;

	if (copy_from_user(val, iter->cur, len))
		return -EFAULT;

	iter->cur += len;
	return 0;
}

153 154 155 156 157 158 159 160 161 162 163
static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
						  size_t len)
{
	const void __user *res = iter->cur;

	if (iter->cur + len > iter->end)
		return ERR_PTR(-ENOSPC);
	iter->cur += len;
	return res;
}

164 165 166 167 168 169 170
static int uverbs_request_finish(struct uverbs_req_iter *iter)
{
	if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur))
		return -EOPNOTSUPP;
	return 0;
}

171
static struct ib_uverbs_completion_event_file *
172
_ib_uverbs_lookup_comp_file(s32 fd, const struct uverbs_attr_bundle *attrs)
173
{
174
	struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
175
					       fd, attrs);
176 177 178 179 180 181 182

	if (IS_ERR(uobj))
		return (void *)uobj;

	uverbs_uobject_get(uobj);
	uobj_put_read(uobj);

183 184
	return container_of(uobj, struct ib_uverbs_completion_event_file,
			    uobj);
185
}
186 187
#define ib_uverbs_lookup_comp_file(_fd, _ufile)                                \
	_ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
188

189 190 191
static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs,
				 const char __user *buf, int in_len,
				 int out_len)
192
{
193
	struct ib_uverbs_file *file = attrs->ufile;
194 195
	struct ib_uverbs_get_context      cmd;
	struct ib_uverbs_get_context_resp resp;
196
	struct ib_ucontext		 *ucontext;
197
	struct file			 *filp;
198
	struct ib_rdmacg_object		 cg_obj;
199
	struct ib_device *ib_dev;
200
	int ret;
201

202 203 204
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
205

206
	mutex_lock(&file->ucontext_lock);
207 208 209 210 211 212
	ib_dev = srcu_dereference(file->device->ib_dev,
				  &file->device->disassociate_srcu);
	if (!ib_dev) {
		ret = -EIO;
		goto err;
	}
213 214 215 216 217 218

	if (file->ucontext) {
		ret = -EINVAL;
		goto err;
	}

219 220 221 222
	ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
	if (ret)
		goto err;

223
	ucontext = ib_dev->alloc_ucontext(ib_dev, &attrs->driver_udata);
224
	if (IS_ERR(ucontext)) {
225
		ret = PTR_ERR(ucontext);
226
		goto err_alloc;
227
	}
228

229
	ucontext->device = ib_dev;
230
	ucontext->cg_obj = cg_obj;
231 232
	/* ufile is required when some objects are released */
	ucontext->ufile = file;
233

234
	ucontext->closing = false;
235
	ucontext->cleanup_retryable = false;
236

237
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
238 239
	mutex_init(&ucontext->per_mm_list_lock);
	INIT_LIST_HEAD(&ucontext->per_mm_list);
240
	if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
241 242 243 244
		ucontext->invalidate_range = NULL;

#endif

245 246
	resp.num_comp_vectors = file->device->num_comp_vectors;

247
	ret = get_unused_fd_flags(O_CLOEXEC);
248 249 250 251
	if (ret < 0)
		goto err_free;
	resp.async_fd = ret;

252
	filp = ib_uverbs_alloc_async_event_file(file, ib_dev);
253 254
	if (IS_ERR(filp)) {
		ret = PTR_ERR(filp);
255
		goto err_fd;
256
	}
257

258 259
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
260
		goto err_file;
261

262 263
	fd_install(resp.async_fd, filp);

264 265 266 267 268 269
	/*
	 * Make sure that ib_uverbs_get_ucontext() sees the pointer update
	 * only after all writes to setup the ucontext have completed
	 */
	smp_store_release(&file->ucontext, ucontext);

270
	mutex_unlock(&file->ucontext_lock);
271

272
	return 0;
273

274
err_file:
275
	ib_uverbs_free_async_event_file(file);
276 277
	fput(filp);

278 279 280
err_fd:
	put_unused_fd(resp.async_fd);

281
err_free:
282
	ib_dev->dealloc_ucontext(ucontext);
283

284 285 286
err_alloc:
	ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);

287
err:
288
	mutex_unlock(&file->ucontext_lock);
289
	return ret;
290 291
}

292
static void copy_query_dev_fields(struct ib_ucontext *ucontext,
293 294 295
				  struct ib_uverbs_query_device_resp *resp,
				  struct ib_device_attr *attr)
{
296 297
	struct ib_device *ib_dev = ucontext->device;

298
	resp->fw_ver		= attr->fw_ver;
299
	resp->node_guid		= ib_dev->node_guid;
300 301 302 303 304 305 306 307
	resp->sys_image_guid	= attr->sys_image_guid;
	resp->max_mr_size	= attr->max_mr_size;
	resp->page_size_cap	= attr->page_size_cap;
	resp->vendor_id		= attr->vendor_id;
	resp->vendor_part_id	= attr->vendor_part_id;
	resp->hw_ver		= attr->hw_ver;
	resp->max_qp		= attr->max_qp;
	resp->max_qp_wr		= attr->max_qp_wr;
308
	resp->device_cap_flags	= lower_32_bits(attr->device_cap_flags);
309
	resp->max_sge		= min(attr->max_send_sge, attr->max_recv_sge);
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	resp->max_sge_rd	= attr->max_sge_rd;
	resp->max_cq		= attr->max_cq;
	resp->max_cqe		= attr->max_cqe;
	resp->max_mr		= attr->max_mr;
	resp->max_pd		= attr->max_pd;
	resp->max_qp_rd_atom	= attr->max_qp_rd_atom;
	resp->max_ee_rd_atom	= attr->max_ee_rd_atom;
	resp->max_res_rd_atom	= attr->max_res_rd_atom;
	resp->max_qp_init_rd_atom	= attr->max_qp_init_rd_atom;
	resp->max_ee_init_rd_atom	= attr->max_ee_init_rd_atom;
	resp->atomic_cap		= attr->atomic_cap;
	resp->max_ee			= attr->max_ee;
	resp->max_rdd			= attr->max_rdd;
	resp->max_mw			= attr->max_mw;
	resp->max_raw_ipv6_qp		= attr->max_raw_ipv6_qp;
	resp->max_raw_ethy_qp		= attr->max_raw_ethy_qp;
	resp->max_mcast_grp		= attr->max_mcast_grp;
	resp->max_mcast_qp_attach	= attr->max_mcast_qp_attach;
	resp->max_total_mcast_qp_attach	= attr->max_total_mcast_qp_attach;
	resp->max_ah			= attr->max_ah;
	resp->max_fmr			= attr->max_fmr;
	resp->max_map_per_fmr		= attr->max_map_per_fmr;
	resp->max_srq			= attr->max_srq;
	resp->max_srq_wr		= attr->max_srq_wr;
	resp->max_srq_sge		= attr->max_srq_sge;
	resp->max_pkeys			= attr->max_pkeys;
	resp->local_ca_ack_delay	= attr->local_ca_ack_delay;
337
	resp->phys_port_cnt		= ib_dev->phys_port_cnt;
338 339
}

340 341 342
static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs,
				  const char __user *buf, int in_len,
				  int out_len)
343 344 345
{
	struct ib_uverbs_query_device      cmd;
	struct ib_uverbs_query_device_resp resp;
346
	struct ib_ucontext *ucontext;
347
	int ret;
348

349
	ucontext = ib_uverbs_get_ucontext(attrs);
350 351
	if (IS_ERR(ucontext))
		return PTR_ERR(ucontext);
352

353 354 355
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
356 357

	memset(&resp, 0, sizeof resp);
358
	copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs);
359

360
	return uverbs_response(attrs, &resp, sizeof(resp));
361 362
}

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
/*
 * ib_uverbs_query_port_resp.port_cap_flags started out as just a copy of the
 * PortInfo CapabilityMask, but was extended with unique bits.
 */
static u32 make_port_cap_flags(const struct ib_port_attr *attr)
{
	u32 res;

	/* All IBA CapabilityMask bits are passed through here, except bit 26,
	 * which is overridden with IP_BASED_GIDS. This is due to a historical
	 * mistake in the implementation of IP_BASED_GIDS. Otherwise all other
	 * bits match the IBA definition across all kernel versions.
	 */
	res = attr->port_cap_flags & ~(u32)IB_UVERBS_PCF_IP_BASED_GIDS;

	if (attr->ip_gids)
		res |= IB_UVERBS_PCF_IP_BASED_GIDS;

	return res;
}

384 385
static int ib_uverbs_query_port(struct uverbs_attr_bundle *attrs,
				const char __user *buf, int in_len, int out_len)
386 387 388 389 390
{
	struct ib_uverbs_query_port      cmd;
	struct ib_uverbs_query_port_resp resp;
	struct ib_port_attr              attr;
	int                              ret;
391 392 393
	struct ib_ucontext *ucontext;
	struct ib_device *ib_dev;

394
	ucontext = ib_uverbs_get_ucontext(attrs);
395 396 397
	if (IS_ERR(ucontext))
		return PTR_ERR(ucontext);
	ib_dev = ucontext->device;
398

399 400 401
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
402

403
	ret = ib_query_port(ib_dev, cmd.port_num, &attr);
404 405 406 407 408 409 410 411 412
	if (ret)
		return ret;

	memset(&resp, 0, sizeof resp);

	resp.state 	     = attr.state;
	resp.max_mtu 	     = attr.max_mtu;
	resp.active_mtu      = attr.active_mtu;
	resp.gid_tbl_len     = attr.gid_tbl_len;
413
	resp.port_cap_flags  = make_port_cap_flags(&attr);
414 415 416 417
	resp.max_msg_sz      = attr.max_msg_sz;
	resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
	resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
	resp.pkey_tbl_len    = attr.pkey_tbl_len;
H
Hiatt, Don 已提交
418

419 420 421
	if (rdma_is_grh_required(ib_dev, cmd.port_num))
		resp.flags |= IB_UVERBS_QPF_GRH_REQUIRED;

422
	if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) {
H
Hiatt, Don 已提交
423
		resp.lid     = OPA_TO_IB_UCAST_LID(attr.lid);
424 425
		resp.sm_lid  = OPA_TO_IB_UCAST_LID(attr.sm_lid);
	} else {
H
Hiatt, Don 已提交
426 427
		resp.lid     = ib_lid_cpu16(attr.lid);
		resp.sm_lid  = ib_lid_cpu16(attr.sm_lid);
428
	}
429 430 431 432 433 434 435 436
	resp.lmc 	     = attr.lmc;
	resp.max_vl_num      = attr.max_vl_num;
	resp.sm_sl 	     = attr.sm_sl;
	resp.subnet_timeout  = attr.subnet_timeout;
	resp.init_type_reply = attr.init_type_reply;
	resp.active_width    = attr.active_width;
	resp.active_speed    = attr.active_speed;
	resp.phys_state      = attr.phys_state;
437
	resp.link_layer      = rdma_port_get_link_layer(ib_dev,
438
							cmd.port_num);
439

440
	return uverbs_response(attrs, &resp, sizeof(resp));
441 442
}

443 444
static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs,
			      const char __user *buf, int in_len, int out_len)
445 446 447 448 449 450
{
	struct ib_uverbs_alloc_pd      cmd;
	struct ib_uverbs_alloc_pd_resp resp;
	struct ib_uobject             *uobj;
	struct ib_pd                  *pd;
	int                            ret;
451
	struct ib_device *ib_dev;
452

453 454 455
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
456

457
	uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev);
458 459
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
460

461
	pd = ib_dev->alloc_pd(ib_dev, uobj->context, &attrs->driver_udata);
462 463 464 465 466
	if (IS_ERR(pd)) {
		ret = PTR_ERR(pd);
		goto err;
	}

467
	pd->device  = ib_dev;
468
	pd->uobject = uobj;
469
	pd->__internal_mr = NULL;
470 471
	atomic_set(&pd->usecnt, 0);

472
	uobj->object = pd;
473 474
	memset(&resp, 0, sizeof resp);
	resp.pd_handle = uobj->id;
475 476
	pd->res.type = RDMA_RESTRACK_PD;
	rdma_restrack_add(&pd->res);
477

478 479
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
480
		goto err_copy;
481

482
	return uobj_alloc_commit(uobj);
483

484
err_copy:
485 486 487
	ib_dealloc_pd(pd);

err:
488
	uobj_alloc_abort(uobj);
489 490 491
	return ret;
}

492 493
static int ib_uverbs_dealloc_pd(struct uverbs_attr_bundle *attrs,
				const char __user *buf, int in_len, int out_len)
494 495
{
	struct ib_uverbs_dealloc_pd cmd;
496
	int ret;
497

498 499 500
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
501

502
	return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
503 504
}

505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
struct xrcd_table_entry {
	struct rb_node  node;
	struct ib_xrcd *xrcd;
	struct inode   *inode;
};

static int xrcd_table_insert(struct ib_uverbs_device *dev,
			    struct inode *inode,
			    struct ib_xrcd *xrcd)
{
	struct xrcd_table_entry *entry, *scan;
	struct rb_node **p = &dev->xrcd_tree.rb_node;
	struct rb_node *parent = NULL;

	entry = kmalloc(sizeof *entry, GFP_KERNEL);
	if (!entry)
		return -ENOMEM;

	entry->xrcd  = xrcd;
	entry->inode = inode;

	while (*p) {
		parent = *p;
		scan = rb_entry(parent, struct xrcd_table_entry, node);

		if (inode < scan->inode) {
			p = &(*p)->rb_left;
		} else if (inode > scan->inode) {
			p = &(*p)->rb_right;
		} else {
			kfree(entry);
			return -EEXIST;
		}
	}

	rb_link_node(&entry->node, parent, p);
	rb_insert_color(&entry->node, &dev->xrcd_tree);
	igrab(inode);
	return 0;
}

static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
						  struct inode *inode)
{
	struct xrcd_table_entry *entry;
	struct rb_node *p = dev->xrcd_tree.rb_node;

	while (p) {
		entry = rb_entry(p, struct xrcd_table_entry, node);

		if (inode < entry->inode)
			p = p->rb_left;
		else if (inode > entry->inode)
			p = p->rb_right;
		else
			return entry;
	}

	return NULL;
}

static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
{
	struct xrcd_table_entry *entry;

	entry = xrcd_table_search(dev, inode);
	if (!entry)
		return NULL;

	return entry->xrcd;
}

static void xrcd_table_delete(struct ib_uverbs_device *dev,
			      struct inode *inode)
{
	struct xrcd_table_entry *entry;

	entry = xrcd_table_search(dev, inode);
	if (entry) {
		iput(inode);
		rb_erase(&entry->node, &dev->xrcd_tree);
		kfree(entry);
	}
}

590 591
static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs,
			       const char __user *buf, int in_len, int out_len)
592
{
593
	struct ib_uverbs_device *ibudev = attrs->ufile->device;
594 595 596 597
	struct ib_uverbs_open_xrcd	cmd;
	struct ib_uverbs_open_xrcd_resp	resp;
	struct ib_uxrcd_object         *obj;
	struct ib_xrcd                 *xrcd = NULL;
598
	struct fd			f = {NULL, 0};
599
	struct inode                   *inode = NULL;
600
	int				ret = 0;
601
	int				new_xrcd = 0;
602
	struct ib_device *ib_dev;
603

604 605 606
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
607

608
	mutex_lock(&ibudev->xrcd_tree_mutex);
609 610 611

	if (cmd.fd != -1) {
		/* search for file descriptor */
612 613
		f = fdget(cmd.fd);
		if (!f.file) {
614 615 616 617
			ret = -EBADF;
			goto err_tree_mutex_unlock;
		}

A
Al Viro 已提交
618
		inode = file_inode(f.file);
619
		xrcd = find_xrcd(ibudev, inode);
620 621 622 623 624 625 626 627 628 629 630 631
		if (!xrcd && !(cmd.oflags & O_CREAT)) {
			/* no file descriptor. Need CREATE flag */
			ret = -EAGAIN;
			goto err_tree_mutex_unlock;
		}

		if (xrcd && cmd.oflags & O_EXCL) {
			ret = -EINVAL;
			goto err_tree_mutex_unlock;
		}
	}

632
	obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, attrs,
633
						   &ib_dev);
634 635
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
636 637 638 639
		goto err_tree_mutex_unlock;
	}

	if (!xrcd) {
640 641
		xrcd = ib_dev->alloc_xrcd(ib_dev, obj->uobject.context,
					  &attrs->driver_udata);
642 643 644 645 646 647
		if (IS_ERR(xrcd)) {
			ret = PTR_ERR(xrcd);
			goto err;
		}

		xrcd->inode   = inode;
648
		xrcd->device  = ib_dev;
649 650 651 652 653 654 655 656 657 658 659 660 661 662
		atomic_set(&xrcd->usecnt, 0);
		mutex_init(&xrcd->tgt_qp_mutex);
		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
		new_xrcd = 1;
	}

	atomic_set(&obj->refcnt, 0);
	obj->uobject.object = xrcd;
	memset(&resp, 0, sizeof resp);
	resp.xrcd_handle = obj->uobject.id;

	if (inode) {
		if (new_xrcd) {
			/* create new inode/xrcd table entry */
663
			ret = xrcd_table_insert(ibudev, inode, xrcd);
664
			if (ret)
665
				goto err_dealloc_xrcd;
666 667 668 669
		}
		atomic_inc(&xrcd->usecnt);
	}

670 671
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
672 673
		goto err_copy;

674 675
	if (f.file)
		fdput(f);
676

677
	mutex_unlock(&ibudev->xrcd_tree_mutex);
678

679
	return uobj_alloc_commit(&obj->uobject);
680 681 682 683

err_copy:
	if (inode) {
		if (new_xrcd)
684
			xrcd_table_delete(ibudev, inode);
685 686 687
		atomic_dec(&xrcd->usecnt);
	}

688
err_dealloc_xrcd:
689 690 691
	ib_dealloc_xrcd(xrcd);

err:
692
	uobj_alloc_abort(&obj->uobject);
693 694

err_tree_mutex_unlock:
695 696
	if (f.file)
		fdput(f);
697

698
	mutex_unlock(&ibudev->xrcd_tree_mutex);
699 700 701 702

	return ret;
}

703 704
static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs,
				const char __user *buf, int in_len, int out_len)
705 706
{
	struct ib_uverbs_close_xrcd cmd;
707
	int ret;
708

709 710 711
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
712

713
	return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs);
714 715
}

716
int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject,
717 718
			   struct ib_xrcd *xrcd,
			   enum rdma_remove_reason why)
719 720
{
	struct inode *inode;
721
	int ret;
722
	struct ib_uverbs_device *dev = uobject->context->ufile->device;
723 724 725

	inode = xrcd->inode;
	if (inode && !atomic_dec_and_test(&xrcd->usecnt))
726
		return 0;
727

728
	ret = ib_dealloc_xrcd(xrcd);
729

730
	if (ib_is_destroy_retryable(ret, why, uobject)) {
731
		atomic_inc(&xrcd->usecnt);
732 733 734 735
		return ret;
	}

	if (inode)
736
		xrcd_table_delete(dev, inode);
737 738

	return ret;
739 740
}

741 742
static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs,
			    const char __user *buf, int in_len, int out_len)
743 744 745
{
	struct ib_uverbs_reg_mr      cmd;
	struct ib_uverbs_reg_mr_resp resp;
746
	struct ib_uobject           *uobj;
747 748 749
	struct ib_pd                *pd;
	struct ib_mr                *mr;
	int                          ret;
750
	struct ib_device *ib_dev;
751

752 753 754
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
755 756 757 758

	if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
		return -EINVAL;

759 760 761
	ret = ib_check_mr_access(cmd.access_flags);
	if (ret)
		return ret;
762

763
	uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
764 765
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
766

767
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
768 769
	if (!pd) {
		ret = -EINVAL;
770
		goto err_free;
771
	}
772

773
	if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
774 775
		if (!(pd->device->attrs.device_cap_flags &
		      IB_DEVICE_ON_DEMAND_PAGING)) {
776 777 778 779 780 781
			pr_debug("ODP support not available\n");
			ret = -EINVAL;
			goto err_put;
		}
	}

782
	mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
783
				     cmd.access_flags, &attrs->driver_udata);
784 785
	if (IS_ERR(mr)) {
		ret = PTR_ERR(mr);
786
		goto err_put;
787 788 789 790
	}

	mr->device  = pd->device;
	mr->pd      = pd;
791
	mr->dm	    = NULL;
792
	mr->uobject = uobj;
793
	atomic_inc(&pd->usecnt);
794 795
	mr->res.type = RDMA_RESTRACK_MR;
	rdma_restrack_add(&mr->res);
796

797
	uobj->object = mr;
798

799 800 801
	memset(&resp, 0, sizeof resp);
	resp.lkey      = mr->lkey;
	resp.rkey      = mr->rkey;
802
	resp.mr_handle = uobj->id;
803

804 805
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
806
		goto err_copy;
807

808
	uobj_put_obj_read(pd);
809

810
	return uobj_alloc_commit(uobj);
811

812
err_copy:
813 814
	ib_dereg_mr(mr);

815
err_put:
816
	uobj_put_obj_read(pd);
817 818

err_free:
819
	uobj_alloc_abort(uobj);
820 821 822
	return ret;
}

823 824
static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs,
			      const char __user *buf, int in_len, int out_len)
825 826 827 828 829 830 831 832 833
{
	struct ib_uverbs_rereg_mr      cmd;
	struct ib_uverbs_rereg_mr_resp resp;
	struct ib_pd                *pd = NULL;
	struct ib_mr                *mr;
	struct ib_pd		    *old_pd;
	int                          ret;
	struct ib_uobject	    *uobj;

834 835 836
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
837 838 839 840 841 842 843 844 845

	if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
		return -EINVAL;

	if ((cmd.flags & IB_MR_REREG_TRANS) &&
	    (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
	     (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
			return -EINVAL;

846
	uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
847 848
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
849 850 851

	mr = uobj->object;

852 853 854 855 856
	if (mr->dm) {
		ret = -EINVAL;
		goto put_uobjs;
	}

857 858 859 860 861 862 863
	if (cmd.flags & IB_MR_REREG_ACCESS) {
		ret = ib_check_mr_access(cmd.access_flags);
		if (ret)
			goto put_uobjs;
	}

	if (cmd.flags & IB_MR_REREG_PD) {
864
		pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
865
				       attrs);
866 867 868 869 870 871 872
		if (!pd) {
			ret = -EINVAL;
			goto put_uobjs;
		}
	}

	old_pd = mr->pd;
873 874 875
	ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, cmd.length,
					cmd.hca_va, cmd.access_flags, pd,
					&attrs->driver_udata);
876 877 878 879 880 881 882 883 884 885 886 887 888 889
	if (!ret) {
		if (cmd.flags & IB_MR_REREG_PD) {
			atomic_inc(&pd->usecnt);
			mr->pd = pd;
			atomic_dec(&old_pd->usecnt);
		}
	} else {
		goto put_uobj_pd;
	}

	memset(&resp, 0, sizeof(resp));
	resp.lkey      = mr->lkey;
	resp.rkey      = mr->rkey;

890
	ret = uverbs_response(attrs, &resp, sizeof(resp));
891 892 893

put_uobj_pd:
	if (cmd.flags & IB_MR_REREG_PD)
894
		uobj_put_obj_read(pd);
895 896

put_uobjs:
897
	uobj_put_write(uobj);
898 899 900 901

	return ret;
}

902 903
static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs,
			      const char __user *buf, int in_len, int out_len)
904 905
{
	struct ib_uverbs_dereg_mr cmd;
906
	int ret;
907

908 909 910
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
911

912
	return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
913 914
}

915 916
static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs,
			      const char __user *buf, int in_len, int out_len)
917 918 919 920 921 922 923
{
	struct ib_uverbs_alloc_mw      cmd;
	struct ib_uverbs_alloc_mw_resp resp;
	struct ib_uobject             *uobj;
	struct ib_pd                  *pd;
	struct ib_mw                  *mw;
	int                            ret;
924
	struct ib_device *ib_dev;
925

926 927 928
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
929

930
	uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev);
931 932
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
933

934
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
935 936 937 938 939
	if (!pd) {
		ret = -EINVAL;
		goto err_free;
	}

940
	mw = pd->device->alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
	if (IS_ERR(mw)) {
		ret = PTR_ERR(mw);
		goto err_put;
	}

	mw->device  = pd->device;
	mw->pd      = pd;
	mw->uobject = uobj;
	atomic_inc(&pd->usecnt);

	uobj->object = mw;

	memset(&resp, 0, sizeof(resp));
	resp.rkey      = mw->rkey;
	resp.mw_handle = uobj->id;

957 958
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
959 960
		goto err_copy;

961
	uobj_put_obj_read(pd);
962
	return uobj_alloc_commit(uobj);
963 964

err_copy:
965
	uverbs_dealloc_mw(mw);
966
err_put:
967
	uobj_put_obj_read(pd);
968
err_free:
969
	uobj_alloc_abort(uobj);
970 971 972
	return ret;
}

973 974
static int ib_uverbs_dealloc_mw(struct uverbs_attr_bundle *attrs,
				const char __user *buf, int in_len, int out_len)
975 976
{
	struct ib_uverbs_dealloc_mw cmd;
977
	int ret;
978

979 980 981
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
982

983
	return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, attrs);
984 985
}

986 987 988
static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs,
					 const char __user *buf, int in_len,
					 int out_len)
989 990 991
{
	struct ib_uverbs_create_comp_channel	   cmd;
	struct ib_uverbs_create_comp_channel_resp  resp;
992 993
	struct ib_uobject			  *uobj;
	struct ib_uverbs_completion_event_file	  *ev_file;
994
	struct ib_device *ib_dev;
995
	int ret;
996

997 998 999
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1000

1001
	uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev);
1002 1003
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
1004

1005 1006 1007
	resp.fd = uobj->id;

	ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
1008
			       uobj);
1009
	ib_uverbs_init_event_queue(&ev_file->ev_queue);
1010

1011 1012
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret) {
1013
		uobj_alloc_abort(uobj);
1014
		return ret;
1015 1016
	}

1017
	return uobj_alloc_commit(uobj);
1018 1019
}

1020
static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
1021
				       struct ib_uverbs_ex_create_cq *cmd,
1022
				       size_t cmd_sz)
1023
{
1024
	struct ib_ucq_object           *obj;
1025
	struct ib_uverbs_completion_event_file    *ev_file = NULL;
1026 1027
	struct ib_cq                   *cq;
	int                             ret;
1028
	struct ib_uverbs_ex_create_cq_resp resp;
1029
	struct ib_cq_init_attr attr = {};
1030
	struct ib_device *ib_dev;
1031

1032
	if (cmd->comp_vector >= attrs->ufile->device->num_comp_vectors)
1033
		return ERR_PTR(-EINVAL);
1034

1035
	obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, attrs,
1036
						 &ib_dev);
1037 1038
	if (IS_ERR(obj))
		return obj;
1039

1040
	if (cmd->comp_channel >= 0) {
1041
		ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, attrs);
1042 1043
		if (IS_ERR(ev_file)) {
			ret = PTR_ERR(ev_file);
1044 1045 1046 1047
			goto err;
		}
	}

1048
	obj->uobject.user_handle = cmd->user_handle;
1049 1050 1051 1052
	obj->comp_events_reported  = 0;
	obj->async_events_reported = 0;
	INIT_LIST_HEAD(&obj->comp_list);
	INIT_LIST_HEAD(&obj->async_list);
1053

1054 1055 1056 1057 1058 1059
	attr.cqe = cmd->cqe;
	attr.comp_vector = cmd->comp_vector;

	if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
		attr.flags = cmd->flags;

1060 1061
	cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context,
			       &attrs->driver_udata);
1062 1063
	if (IS_ERR(cq)) {
		ret = PTR_ERR(cq);
1064
		goto err_file;
1065 1066
	}

1067
	cq->device        = ib_dev;
1068
	cq->uobject       = &obj->uobject;
1069 1070
	cq->comp_handler  = ib_uverbs_comp_handler;
	cq->event_handler = ib_uverbs_cq_event_handler;
1071
	cq->cq_context    = ev_file ? &ev_file->ev_queue : NULL;
1072 1073
	atomic_set(&cq->usecnt, 0);

1074
	obj->uobject.object = cq;
1075
	memset(&resp, 0, sizeof resp);
1076 1077
	resp.base.cq_handle = obj->uobject.id;
	resp.base.cqe       = cq->cqe;
1078
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1079

1080 1081 1082
	cq->res.type = RDMA_RESTRACK_CQ;
	rdma_restrack_add(&cq->res);

1083
	ret = uverbs_response(attrs, &resp, sizeof(resp));
1084 1085
	if (ret)
		goto err_cb;
1086

1087
	ret = uobj_alloc_commit(&obj->uobject);
1088 1089
	if (ret)
		return ERR_PTR(ret);
1090
	return obj;
1091

1092
err_cb:
1093 1094
	ib_destroy_cq(cq);

1095
err_file:
1096
	if (ev_file)
1097
		ib_uverbs_release_ucq(attrs->ufile, ev_file, obj);
1098 1099

err:
1100
	uobj_alloc_abort(&obj->uobject);
1101 1102 1103 1104

	return ERR_PTR(ret);
}

1105 1106
static int ib_uverbs_create_cq(struct uverbs_attr_bundle *attrs,
			       const char __user *buf, int in_len, int out_len)
1107 1108 1109 1110
{
	struct ib_uverbs_create_cq      cmd;
	struct ib_uverbs_ex_create_cq	cmd_ex;
	struct ib_ucq_object           *obj;
1111
	int ret;
1112

1113 1114 1115
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1116 1117 1118 1119 1120 1121 1122

	memset(&cmd_ex, 0, sizeof(cmd_ex));
	cmd_ex.user_handle = cmd.user_handle;
	cmd_ex.cqe = cmd.cqe;
	cmd_ex.comp_vector = cmd.comp_vector;
	cmd_ex.comp_channel = cmd.comp_channel;

1123
	obj = create_cq(attrs, &cmd_ex,
1124
			offsetof(typeof(cmd_ex), comp_channel) +
1125
				sizeof(cmd.comp_channel));
1126
	return PTR_ERR_OR_ZERO(obj);
1127 1128
}

1129
static int ib_uverbs_ex_create_cq(struct uverbs_attr_bundle *attrs,
1130
				  struct ib_udata *ucore)
1131 1132 1133
{
	struct ib_uverbs_ex_create_cq  cmd;
	struct ib_ucq_object           *obj;
1134
	int ret;
1135

1136 1137 1138
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1139 1140 1141 1142 1143 1144 1145

	if (cmd.comp_mask)
		return -EINVAL;

	if (cmd.reserved)
		return -EINVAL;

1146
	obj = create_cq(attrs, &cmd, min(ucore->inlen, sizeof(cmd)));
G
Gomonovych, Vasyl 已提交
1147
	return PTR_ERR_OR_ZERO(obj);
1148 1149
}

1150 1151
static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs,
			       const char __user *buf, int in_len, int out_len)
1152 1153
{
	struct ib_uverbs_resize_cq	cmd;
1154
	struct ib_uverbs_resize_cq_resp	resp = {};
1155 1156 1157
	struct ib_cq			*cq;
	int				ret = -EINVAL;

1158 1159 1160
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1161

1162
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1163 1164
	if (!cq)
		return -EINVAL;
1165

1166
	ret = cq->device->resize_cq(cq, cmd.cqe, &attrs->driver_udata);
1167 1168 1169 1170 1171
	if (ret)
		goto out;

	resp.cqe = cq->cqe;

1172
	ret = uverbs_response(attrs, &resp, sizeof(resp));
1173
out:
1174
	uobj_put_obj_read(cq);
1175

1176
	return ret;
1177 1178
}

1179 1180
static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
			   struct ib_wc *wc)
1181 1182 1183 1184 1185 1186 1187 1188
{
	struct ib_uverbs_wc tmp;

	tmp.wr_id		= wc->wr_id;
	tmp.status		= wc->status;
	tmp.opcode		= wc->opcode;
	tmp.vendor_err		= wc->vendor_err;
	tmp.byte_len		= wc->byte_len;
1189
	tmp.ex.imm_data		= wc->ex.imm_data;
1190 1191 1192 1193
	tmp.qp_num		= wc->qp->qp_num;
	tmp.src_qp		= wc->src_qp;
	tmp.wc_flags		= wc->wc_flags;
	tmp.pkey_index		= wc->pkey_index;
1194
	if (rdma_cap_opa_ah(ib_dev, wc->port_num))
H
Hiatt, Don 已提交
1195
		tmp.slid	= OPA_TO_IB_UCAST_LID(wc->slid);
1196
	else
H
Hiatt, Don 已提交
1197
		tmp.slid	= ib_lid_cpu16(wc->slid);
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
	tmp.sl			= wc->sl;
	tmp.dlid_path_bits	= wc->dlid_path_bits;
	tmp.port_num		= wc->port_num;
	tmp.reserved		= 0;

	if (copy_to_user(dest, &tmp, sizeof tmp))
		return -EFAULT;

	return 0;
}

1209 1210
static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs,
			     const char __user *buf, int in_len, int out_len)
1211 1212
{
	struct ib_uverbs_poll_cq       cmd;
1213 1214 1215
	struct ib_uverbs_poll_cq_resp  resp;
	u8 __user                     *header_ptr;
	u8 __user                     *data_ptr;
1216
	struct ib_cq                  *cq;
1217 1218
	struct ib_wc                   wc;
	int                            ret;
1219

1220 1221 1222
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1223

1224
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1225 1226
	if (!cq)
		return -EINVAL;
1227

1228
	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
1229
	header_ptr = attrs->ucore.outbuf;
1230
	data_ptr = header_ptr + sizeof resp;
1231

1232 1233 1234 1235 1236 1237 1238 1239
	memset(&resp, 0, sizeof resp);
	while (resp.count < cmd.ne) {
		ret = ib_poll_cq(cq, 1, &wc);
		if (ret < 0)
			goto out_put;
		if (!ret)
			break;

1240
		ret = copy_wc_to_user(cq->device, data_ptr, &wc);
1241 1242 1243 1244 1245
		if (ret)
			goto out_put;

		data_ptr += sizeof(struct ib_uverbs_wc);
		++resp.count;
1246 1247
	}

1248
	if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1249
		ret = -EFAULT;
1250 1251
		goto out_put;
	}
1252

1253
	ret = 0;
1254

1255
out_put:
1256
	uobj_put_obj_read(cq);
1257
	return ret;
1258 1259
}

1260 1261 1262
static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs,
				   const char __user *buf, int in_len,
				   int out_len)
1263 1264 1265
{
	struct ib_uverbs_req_notify_cq cmd;
	struct ib_cq                  *cq;
1266
	int ret;
1267

1268 1269 1270
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1271

1272
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1273
	if (!cq)
1274
		return -EINVAL;
1275

1276 1277 1278
	ib_req_notify_cq(cq, cmd.solicited_only ?
			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);

1279
	uobj_put_obj_read(cq);
1280

1281
	return 0;
1282 1283
}

1284 1285
static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs,
				const char __user *buf, int in_len, int out_len)
1286
{
1287 1288
	struct ib_uverbs_destroy_cq      cmd;
	struct ib_uverbs_destroy_cq_resp resp;
1289 1290
	struct ib_uobject		*uobj;
	struct ib_ucq_object        	*obj;
1291
	int ret;
1292

1293 1294 1295
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1296

1297
	uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1298 1299 1300
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);

1301
	obj = container_of(uobj, struct ib_ucq_object, uobject);
1302
	memset(&resp, 0, sizeof(resp));
1303 1304
	resp.comp_events_reported  = obj->comp_events_reported;
	resp.async_events_reported = obj->async_events_reported;
1305

1306 1307
	uobj_put_destroy(uobj);

1308
	return uverbs_response(attrs, &resp, sizeof(resp));
1309 1310
}

1311
static int create_qp(struct uverbs_attr_bundle *attrs,
1312
		     struct ib_uverbs_ex_create_qp *cmd, size_t cmd_sz)
1313
{
1314 1315 1316 1317
	struct ib_uqp_object		*obj;
	struct ib_device		*device;
	struct ib_pd			*pd = NULL;
	struct ib_xrcd			*xrcd = NULL;
1318
	struct ib_uobject		*xrcd_uobj = ERR_PTR(-ENOENT);
1319 1320 1321 1322
	struct ib_cq			*scq = NULL, *rcq = NULL;
	struct ib_srq			*srq = NULL;
	struct ib_qp			*qp;
	char				*buf;
1323
	struct ib_qp_init_attr		attr = {};
1324 1325
	struct ib_uverbs_ex_create_qp_resp resp;
	int				ret;
1326 1327
	struct ib_rwq_ind_table *ind_tbl = NULL;
	bool has_sq = true;
1328
	struct ib_device *ib_dev;
1329 1330

	if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
O
Or Gerlitz 已提交
1331 1332
		return -EPERM;

1333
	obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1334
						 &ib_dev);
1335 1336 1337 1338
	if (IS_ERR(obj))
		return PTR_ERR(obj);
	obj->uxrcd = NULL;
	obj->uevent.uobject.user_handle = cmd->user_handle;
1339
	mutex_init(&obj->mcast_lock);
1340

1341 1342 1343
	if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
		      sizeof(cmd->rwq_ind_tbl_handle) &&
		      (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
1344 1345
		ind_tbl = uobj_get_obj_read(rwq_ind_table,
					    UVERBS_OBJECT_RWQ_IND_TBL,
1346
					    cmd->rwq_ind_tbl_handle, attrs);
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
		if (!ind_tbl) {
			ret = -EINVAL;
			goto err_put;
		}

		attr.rwq_ind_tbl = ind_tbl;
	}

	if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
		ret = -EINVAL;
		goto err_put;
	}

	if (ind_tbl && !cmd->max_send_wr)
		has_sq = false;
1362

1363
	if (cmd->qp_type == IB_QPT_XRC_TGT) {
1364
		xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
1365
					  attrs);
1366 1367 1368 1369 1370 1371 1372

		if (IS_ERR(xrcd_uobj)) {
			ret = -EINVAL;
			goto err_put;
		}

		xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1373 1374 1375 1376 1377
		if (!xrcd) {
			ret = -EINVAL;
			goto err_put;
		}
		device = xrcd->device;
1378
	} else {
1379 1380 1381
		if (cmd->qp_type == IB_QPT_XRC_INI) {
			cmd->max_recv_wr = 0;
			cmd->max_recv_sge = 0;
1382
		} else {
1383
			if (cmd->is_srq) {
1384
				srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
1385
							cmd->srq_handle, attrs);
1386
				if (!srq || srq->srq_type == IB_SRQT_XRC) {
1387 1388 1389 1390
					ret = -EINVAL;
					goto err_put;
				}
			}
1391

1392 1393
			if (!ind_tbl) {
				if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1394 1395
					rcq = uobj_get_obj_read(
						cq, UVERBS_OBJECT_CQ,
1396
						cmd->recv_cq_handle, attrs);
1397 1398 1399 1400
					if (!rcq) {
						ret = -EINVAL;
						goto err_put;
					}
1401
				}
1402 1403
			}
		}
1404

1405
		if (has_sq)
1406
			scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
1407
						cmd->send_cq_handle, attrs);
1408 1409
		if (!ind_tbl)
			rcq = rcq ?: scq;
1410
		pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
1411
				       attrs);
1412
		if (!pd || (!scq && has_sq)) {
1413 1414 1415 1416
			ret = -EINVAL;
			goto err_put;
		}

1417
		device = pd->device;
1418 1419
	}

1420
	attr.event_handler = ib_uverbs_qp_event_handler;
1421
	attr.qp_context    = attrs->ufile;
1422 1423
	attr.send_cq       = scq;
	attr.recv_cq       = rcq;
1424
	attr.srq           = srq;
1425
	attr.xrcd	   = xrcd;
1426 1427 1428
	attr.sq_sig_type   = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
					      IB_SIGNAL_REQ_WR;
	attr.qp_type       = cmd->qp_type;
1429
	attr.create_flags  = 0;
1430

1431 1432 1433 1434 1435
	attr.cap.max_send_wr     = cmd->max_send_wr;
	attr.cap.max_recv_wr     = cmd->max_recv_wr;
	attr.cap.max_send_sge    = cmd->max_send_sge;
	attr.cap.max_recv_sge    = cmd->max_recv_sge;
	attr.cap.max_inline_data = cmd->max_inline_data;
1436

1437 1438 1439
	obj->uevent.events_reported     = 0;
	INIT_LIST_HEAD(&obj->uevent.event_list);
	INIT_LIST_HEAD(&obj->mcast_list);
1440

1441 1442 1443 1444
	if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
		      sizeof(cmd->create_flags))
		attr.create_flags = cmd->create_flags;

1445 1446 1447
	if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
				IB_QP_CREATE_CROSS_CHANNEL |
				IB_QP_CREATE_MANAGED_SEND |
1448
				IB_QP_CREATE_MANAGED_RECV |
1449
				IB_QP_CREATE_SCATTER_FCS |
1450
				IB_QP_CREATE_CVLAN_STRIPPING |
1451 1452
				IB_QP_CREATE_SOURCE_QPN |
				IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
1453 1454 1455 1456
		ret = -EINVAL;
		goto err_put;
	}

1457 1458 1459 1460 1461 1462 1463 1464 1465
	if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
		if (!capable(CAP_NET_RAW)) {
			ret = -EPERM;
			goto err_put;
		}

		attr.source_qpn = cmd->source_qpn;
	}

1466 1467 1468 1469 1470 1471 1472 1473 1474
	buf = (void *)cmd + sizeof(*cmd);
	if (cmd_sz > sizeof(*cmd))
		if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
					     cmd_sz - sizeof(*cmd) - 1))) {
			ret = -EINVAL;
			goto err_put;
		}

	if (cmd->qp_type == IB_QPT_XRC_TGT)
1475 1476
		qp = ib_create_qp(pd, &attr);
	else
1477
		qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
1478
				   &obj->uevent.uobject);
1479

1480 1481
	if (IS_ERR(qp)) {
		ret = PTR_ERR(qp);
1482
		goto err_put;
1483 1484
	}

1485
	if (cmd->qp_type != IB_QPT_XRC_TGT) {
1486 1487 1488 1489
		ret = ib_create_qp_security(qp, device);
		if (ret)
			goto err_cb;

1490
		qp->real_qp	  = qp;
1491 1492 1493 1494
		qp->pd		  = pd;
		qp->send_cq	  = attr.send_cq;
		qp->recv_cq	  = attr.recv_cq;
		qp->srq		  = attr.srq;
1495
		qp->rwq_ind_tbl	  = ind_tbl;
1496 1497 1498
		qp->event_handler = attr.event_handler;
		qp->qp_context	  = attr.qp_context;
		qp->qp_type	  = attr.qp_type;
1499
		atomic_set(&qp->usecnt, 0);
1500
		atomic_inc(&pd->usecnt);
1501
		qp->port = 0;
1502 1503
		if (attr.send_cq)
			atomic_inc(&attr.send_cq->usecnt);
1504 1505 1506 1507
		if (attr.recv_cq)
			atomic_inc(&attr.recv_cq->usecnt);
		if (attr.srq)
			atomic_inc(&attr.srq->usecnt);
1508 1509
		if (ind_tbl)
			atomic_inc(&ind_tbl->usecnt);
1510 1511 1512
	} else {
		/* It is done in _ib_create_qp for other QP types */
		qp->uobject = &obj->uevent.uobject;
1513
	}
1514

1515
	obj->uevent.uobject.object = qp;
1516

1517
	memset(&resp, 0, sizeof resp);
1518 1519 1520 1521 1522 1523 1524
	resp.base.qpn             = qp->qp_num;
	resp.base.qp_handle       = obj->uevent.uobject.id;
	resp.base.max_recv_sge    = attr.cap.max_recv_sge;
	resp.base.max_send_sge    = attr.cap.max_send_sge;
	resp.base.max_recv_wr     = attr.cap.max_recv_wr;
	resp.base.max_send_wr     = attr.cap.max_send_wr;
	resp.base.max_inline_data = attr.cap.max_inline_data;
1525
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1526

1527
	ret = uverbs_response(attrs, &resp, sizeof(resp));
1528 1529
	if (ret)
		goto err_cb;
1530

1531 1532 1533 1534
	if (xrcd) {
		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
					  uobject);
		atomic_inc(&obj->uxrcd->refcnt);
1535
		uobj_put_read(xrcd_uobj);
1536 1537
	}

1538
	if (pd)
1539
		uobj_put_obj_read(pd);
1540
	if (scq)
1541
		uobj_put_obj_read(scq);
1542
	if (rcq && rcq != scq)
1543
		uobj_put_obj_read(rcq);
1544
	if (srq)
1545
		uobj_put_obj_read(srq);
1546
	if (ind_tbl)
1547
		uobj_put_obj_read(ind_tbl);
1548

1549
	return uobj_alloc_commit(&obj->uevent.uobject);
1550
err_cb:
1551 1552
	ib_destroy_qp(qp);

1553
err_put:
1554 1555
	if (!IS_ERR(xrcd_uobj))
		uobj_put_read(xrcd_uobj);
1556
	if (pd)
1557
		uobj_put_obj_read(pd);
1558
	if (scq)
1559
		uobj_put_obj_read(scq);
R
Roland Dreier 已提交
1560
	if (rcq && rcq != scq)
1561
		uobj_put_obj_read(rcq);
1562
	if (srq)
1563
		uobj_put_obj_read(srq);
1564
	if (ind_tbl)
1565
		uobj_put_obj_read(ind_tbl);
1566

1567
	uobj_alloc_abort(&obj->uevent.uobject);
1568 1569 1570
	return ret;
}

1571 1572
static int ib_uverbs_create_qp(struct uverbs_attr_bundle *attrs,
			       const char __user *buf, int in_len, int out_len)
1573 1574 1575
{
	struct ib_uverbs_create_qp      cmd;
	struct ib_uverbs_ex_create_qp	cmd_ex;
1576
	int ret;
1577

1578 1579 1580
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596

	memset(&cmd_ex, 0, sizeof(cmd_ex));
	cmd_ex.user_handle = cmd.user_handle;
	cmd_ex.pd_handle = cmd.pd_handle;
	cmd_ex.send_cq_handle = cmd.send_cq_handle;
	cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
	cmd_ex.srq_handle = cmd.srq_handle;
	cmd_ex.max_send_wr = cmd.max_send_wr;
	cmd_ex.max_recv_wr = cmd.max_recv_wr;
	cmd_ex.max_send_sge = cmd.max_send_sge;
	cmd_ex.max_recv_sge = cmd.max_recv_sge;
	cmd_ex.max_inline_data = cmd.max_inline_data;
	cmd_ex.sq_sig_all = cmd.sq_sig_all;
	cmd_ex.qp_type = cmd.qp_type;
	cmd_ex.is_srq = cmd.is_srq;

1597 1598
	return create_qp(attrs, &cmd_ex,
			 offsetof(typeof(cmd_ex), is_srq) + sizeof(cmd.is_srq));
1599 1600
}

1601
static int ib_uverbs_ex_create_qp(struct uverbs_attr_bundle *attrs,
1602
				  struct ib_udata *ucore)
1603
{
1604 1605
	struct ib_uverbs_ex_create_qp cmd;
	int ret;
1606

1607 1608 1609
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1610

1611
	if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
1612 1613 1614 1615 1616
		return -EINVAL;

	if (cmd.reserved)
		return -EINVAL;

1617
	return create_qp(attrs, &cmd, min(ucore->inlen, sizeof(cmd)));
1618 1619
}

1620 1621
static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs,
			     const char __user *buf, int in_len, int out_len)
1622 1623 1624 1625 1626 1627 1628 1629 1630
{
	struct ib_uverbs_open_qp        cmd;
	struct ib_uverbs_create_qp_resp resp;
	struct ib_uqp_object           *obj;
	struct ib_xrcd		       *xrcd;
	struct ib_uobject	       *uninitialized_var(xrcd_uobj);
	struct ib_qp                   *qp;
	struct ib_qp_open_attr          attr;
	int ret;
1631
	struct ib_device *ib_dev;
1632

1633 1634 1635
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1636

1637
	obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1638
						 &ib_dev);
1639 1640
	if (IS_ERR(obj))
		return PTR_ERR(obj);
1641

1642
	xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, attrs);
1643 1644 1645 1646
	if (IS_ERR(xrcd_uobj)) {
		ret = -EINVAL;
		goto err_put;
	}
1647

1648
	xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1649 1650
	if (!xrcd) {
		ret = -EINVAL;
1651
		goto err_xrcd;
1652 1653 1654
	}

	attr.event_handler = ib_uverbs_qp_event_handler;
1655
	attr.qp_context    = attrs->ufile;
1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
	attr.qp_num        = cmd.qpn;
	attr.qp_type       = cmd.qp_type;

	obj->uevent.events_reported = 0;
	INIT_LIST_HEAD(&obj->uevent.event_list);
	INIT_LIST_HEAD(&obj->mcast_list);

	qp = ib_open_qp(xrcd, &attr);
	if (IS_ERR(qp)) {
		ret = PTR_ERR(qp);
1666
		goto err_xrcd;
1667 1668 1669
	}

	obj->uevent.uobject.object = qp;
1670
	obj->uevent.uobject.user_handle = cmd.user_handle;
1671 1672 1673 1674 1675

	memset(&resp, 0, sizeof resp);
	resp.qpn       = qp->qp_num;
	resp.qp_handle = obj->uevent.uobject.id;

1676 1677
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
1678
		goto err_destroy;
1679

1680 1681
	obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
	atomic_inc(&obj->uxrcd->refcnt);
1682 1683
	qp->uobject = &obj->uevent.uobject;
	uobj_put_read(xrcd_uobj);
1684

1685
	return uobj_alloc_commit(&obj->uevent.uobject);
1686 1687 1688

err_destroy:
	ib_destroy_qp(qp);
1689 1690
err_xrcd:
	uobj_put_read(xrcd_uobj);
1691
err_put:
1692
	uobj_alloc_abort(&obj->uevent.uobject);
1693 1694 1695
	return ret;
}

1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717
static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
				   struct rdma_ah_attr *rdma_attr)
{
	const struct ib_global_route   *grh;

	uverb_attr->dlid              = rdma_ah_get_dlid(rdma_attr);
	uverb_attr->sl                = rdma_ah_get_sl(rdma_attr);
	uverb_attr->src_path_bits     = rdma_ah_get_path_bits(rdma_attr);
	uverb_attr->static_rate       = rdma_ah_get_static_rate(rdma_attr);
	uverb_attr->is_global         = !!(rdma_ah_get_ah_flags(rdma_attr) &
					 IB_AH_GRH);
	if (uverb_attr->is_global) {
		grh = rdma_ah_read_grh(rdma_attr);
		memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
		uverb_attr->flow_label        = grh->flow_label;
		uverb_attr->sgid_index        = grh->sgid_index;
		uverb_attr->hop_limit         = grh->hop_limit;
		uverb_attr->traffic_class     = grh->traffic_class;
	}
	uverb_attr->port_num          = rdma_ah_get_port_num(rdma_attr);
}

1718 1719
static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs,
			      const char __user *buf, int in_len, int out_len)
1720 1721 1722 1723 1724 1725 1726 1727
{
	struct ib_uverbs_query_qp      cmd;
	struct ib_uverbs_query_qp_resp resp;
	struct ib_qp                   *qp;
	struct ib_qp_attr              *attr;
	struct ib_qp_init_attr         *init_attr;
	int                            ret;

1728 1729 1730
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
1731 1732 1733 1734 1735 1736 1737 1738

	attr      = kmalloc(sizeof *attr, GFP_KERNEL);
	init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
	if (!attr || !init_attr) {
		ret = -ENOMEM;
		goto out;
	}

1739
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
1740
	if (!qp) {
1741
		ret = -EINVAL;
1742 1743 1744 1745
		goto out;
	}

	ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1746

1747
	uobj_put_obj_read(qp);
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764

	if (ret)
		goto out;

	memset(&resp, 0, sizeof resp);

	resp.qp_state               = attr->qp_state;
	resp.cur_qp_state           = attr->cur_qp_state;
	resp.path_mtu               = attr->path_mtu;
	resp.path_mig_state         = attr->path_mig_state;
	resp.qkey                   = attr->qkey;
	resp.rq_psn                 = attr->rq_psn;
	resp.sq_psn                 = attr->sq_psn;
	resp.dest_qp_num            = attr->dest_qp_num;
	resp.qp_access_flags        = attr->qp_access_flags;
	resp.pkey_index             = attr->pkey_index;
	resp.alt_pkey_index         = attr->alt_pkey_index;
1765
	resp.sq_draining            = attr->sq_draining;
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
	resp.max_rd_atomic          = attr->max_rd_atomic;
	resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
	resp.min_rnr_timer          = attr->min_rnr_timer;
	resp.port_num               = attr->port_num;
	resp.timeout                = attr->timeout;
	resp.retry_cnt              = attr->retry_cnt;
	resp.rnr_retry              = attr->rnr_retry;
	resp.alt_port_num           = attr->alt_port_num;
	resp.alt_timeout            = attr->alt_timeout;

1776 1777
	copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
	copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
1778 1779 1780 1781 1782 1783

	resp.max_send_wr            = init_attr->cap.max_send_wr;
	resp.max_recv_wr            = init_attr->cap.max_recv_wr;
	resp.max_send_sge           = init_attr->cap.max_send_sge;
	resp.max_recv_sge           = init_attr->cap.max_recv_sge;
	resp.max_inline_data        = init_attr->cap.max_inline_data;
1784
	resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1785

1786
	ret = uverbs_response(attrs, &resp, sizeof(resp));
1787 1788 1789 1790 1791

out:
	kfree(attr);
	kfree(init_attr);

1792
	return ret;
1793 1794
}

1795 1796 1797 1798 1799 1800
/* Remove ignored fields set in the attribute mask */
static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
{
	switch (qp_type) {
	case IB_QPT_XRC_INI:
		return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1801 1802 1803
	case IB_QPT_XRC_TGT:
		return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
				IB_QP_RNR_RETRY);
1804 1805 1806 1807 1808
	default:
		return mask;
	}
}

1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
static void copy_ah_attr_from_uverbs(struct ib_device *dev,
				     struct rdma_ah_attr *rdma_attr,
				     struct ib_uverbs_qp_dest *uverb_attr)
{
	rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
	if (uverb_attr->is_global) {
		rdma_ah_set_grh(rdma_attr, NULL,
				uverb_attr->flow_label,
				uverb_attr->sgid_index,
				uverb_attr->hop_limit,
				uverb_attr->traffic_class);
		rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
	} else {
		rdma_ah_set_ah_flags(rdma_attr, 0);
	}
	rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
	rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
	rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
	rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
	rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
	rdma_ah_set_make_grd(rdma_attr, false);
}

1832
static int modify_qp(struct uverbs_attr_bundle *attrs,
1833
		     struct ib_uverbs_ex_modify_qp *cmd)
1834
{
1835 1836 1837
	struct ib_qp_attr *attr;
	struct ib_qp *qp;
	int ret;
1838

1839
	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1840 1841 1842
	if (!attr)
		return -ENOMEM;

1843 1844
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
			       attrs);
1845
	if (!qp) {
1846 1847 1848 1849
		ret = -EINVAL;
		goto out;
	}

1850 1851
	if ((cmd->base.attr_mask & IB_QP_PORT) &&
	    !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1852 1853 1854 1855
		ret = -EINVAL;
		goto release_qp;
	}

1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907
	if ((cmd->base.attr_mask & IB_QP_AV)) {
		if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
			ret = -EINVAL;
			goto release_qp;
		}

		if (cmd->base.attr_mask & IB_QP_STATE &&
		    cmd->base.qp_state == IB_QPS_RTR) {
		/* We are in INIT->RTR TRANSITION (if we are not,
		 * this transition will be rejected in subsequent checks).
		 * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
		 * but the IB_QP_STATE flag is required.
		 *
		 * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
		 * when IB_QP_AV is set, has required inclusion of a valid
		 * port number in the primary AV. (AVs are created and handled
		 * differently for infiniband and ethernet (RoCE) ports).
		 *
		 * Check the port number included in the primary AV against
		 * the port number in the qp struct, which was set (and saved)
		 * in the RST->INIT transition.
		 */
			if (cmd->base.dest.port_num != qp->real_qp->port) {
				ret = -EINVAL;
				goto release_qp;
			}
		} else {
		/* We are in SQD->SQD. (If we are not, this transition will
		 * be rejected later in the verbs layer checks).
		 * Check for both IB_QP_PORT and IB_QP_AV, these can be set
		 * together in the SQD->SQD transition.
		 *
		 * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
		 * verbs layer driver does not track primary port changes
		 * resulting from path migration. Thus, in SQD, if the primary
		 * AV is modified, the primary port should also be modified).
		 *
		 * Note that in this transition, the IB_QP_STATE flag
		 * is not allowed.
		 */
			if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
			     == (IB_QP_AV | IB_QP_PORT)) &&
			    cmd->base.port_num != cmd->base.dest.port_num) {
				ret = -EINVAL;
				goto release_qp;
			}
			if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
			    == IB_QP_AV) {
				cmd->base.attr_mask |= IB_QP_PORT;
				cmd->base.port_num = cmd->base.dest.port_num;
			}
		}
1908 1909
	}

1910
	if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1911
	    (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
1912 1913
	    !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
	    cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
1914 1915 1916 1917
		ret = -EINVAL;
		goto release_qp;
	}

1918 1919
	if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
	    cmd->base.cur_qp_state > IB_QPS_ERR) ||
1920 1921
	    (cmd->base.attr_mask & IB_QP_STATE &&
	    cmd->base.qp_state > IB_QPS_ERR)) {
1922 1923 1924 1925
		ret = -EINVAL;
		goto release_qp;
	}

1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
	if (cmd->base.attr_mask & IB_QP_STATE)
		attr->qp_state = cmd->base.qp_state;
	if (cmd->base.attr_mask & IB_QP_CUR_STATE)
		attr->cur_qp_state = cmd->base.cur_qp_state;
	if (cmd->base.attr_mask & IB_QP_PATH_MTU)
		attr->path_mtu = cmd->base.path_mtu;
	if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
		attr->path_mig_state = cmd->base.path_mig_state;
	if (cmd->base.attr_mask & IB_QP_QKEY)
		attr->qkey = cmd->base.qkey;
	if (cmd->base.attr_mask & IB_QP_RQ_PSN)
		attr->rq_psn = cmd->base.rq_psn;
	if (cmd->base.attr_mask & IB_QP_SQ_PSN)
		attr->sq_psn = cmd->base.sq_psn;
	if (cmd->base.attr_mask & IB_QP_DEST_QPN)
		attr->dest_qp_num = cmd->base.dest_qp_num;
	if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
		attr->qp_access_flags = cmd->base.qp_access_flags;
	if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
		attr->pkey_index = cmd->base.pkey_index;
	if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
		attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
	if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
		attr->max_rd_atomic = cmd->base.max_rd_atomic;
	if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
		attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
	if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
		attr->min_rnr_timer = cmd->base.min_rnr_timer;
	if (cmd->base.attr_mask & IB_QP_PORT)
		attr->port_num = cmd->base.port_num;
	if (cmd->base.attr_mask & IB_QP_TIMEOUT)
		attr->timeout = cmd->base.timeout;
	if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
		attr->retry_cnt = cmd->base.retry_cnt;
	if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
		attr->rnr_retry = cmd->base.rnr_retry;
	if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
		attr->alt_port_num = cmd->base.alt_port_num;
		attr->alt_timeout = cmd->base.alt_timeout;
		attr->alt_pkey_index = cmd->base.alt_pkey_index;
	}
	if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
		attr->rate_limit = cmd->rate_limit;
1969

1970
	if (cmd->base.attr_mask & IB_QP_AV)
1971 1972
		copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
					 &cmd->base.dest);
1973

1974
	if (cmd->base.attr_mask & IB_QP_ALT_PATH)
1975 1976
		copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
					 &cmd->base.alt_dest);
1977

1978 1979 1980
	ret = ib_modify_qp_with_udata(qp, attr,
				      modify_qp_mask(qp->qp_type,
						     cmd->base.attr_mask),
1981
				      &attrs->driver_udata);
1982

1983
release_qp:
1984
	uobj_put_obj_read(qp);
1985 1986 1987 1988 1989 1990
out:
	kfree(attr);

	return ret;
}

1991 1992
static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs,
			       const char __user *buf, int in_len, int out_len)
1993
{
1994
	struct ib_uverbs_ex_modify_qp cmd;
1995
	int ret;
1996

1997 1998 1999
	ret = uverbs_request(attrs, &cmd.base, sizeof(cmd.base));
	if (ret)
		return ret;
2000 2001 2002 2003 2004

	if (cmd.base.attr_mask &
	    ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
		return -EOPNOTSUPP;

2005
	return modify_qp(attrs, &cmd);
2006 2007
}

2008
static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs,
2009
				  struct ib_udata *ucore)
2010
{
2011
	struct ib_uverbs_ex_modify_qp cmd;
2012 2013 2014
	struct ib_uverbs_ex_modify_qp_resp resp = {
		.response_length = uverbs_response_length(attrs, sizeof(resp))
	};
2015 2016
	int ret;

2017 2018 2019 2020
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;

2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
	/*
	 * Last bit is reserved for extending the attr_mask by
	 * using another field.
	 */
	BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));

	if (cmd.base.attr_mask &
	    ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
		return -EOPNOTSUPP;

2031 2032 2033 2034 2035
	ret = modify_qp(attrs, &cmd);
	if (ret)
		return ret;

	return uverbs_response(attrs, &resp, sizeof(resp));
2036 2037
}

2038 2039
static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs,
				const char __user *buf, int in_len, int out_len)
2040
{
2041 2042
	struct ib_uverbs_destroy_qp      cmd;
	struct ib_uverbs_destroy_qp_resp resp;
2043 2044
	struct ib_uobject		*uobj;
	struct ib_uqp_object        	*obj;
2045
	int ret;
2046

2047 2048 2049
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2050

2051
	uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2052 2053 2054
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);

2055
	obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2056
	memset(&resp, 0, sizeof(resp));
2057
	resp.events_reported = obj->uevent.events_reported;
2058 2059

	uobj_put_destroy(uobj);
2060

2061
	return uverbs_response(attrs, &resp, sizeof(resp));
2062 2063
}

C
Christoph Hellwig 已提交
2064 2065
static void *alloc_wr(size_t wr_size, __u32 num_sge)
{
2066 2067 2068 2069
	if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
		       sizeof (struct ib_sge))
		return NULL;

C
Christoph Hellwig 已提交
2070 2071
	return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
			 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2072
}
C
Christoph Hellwig 已提交
2073

2074 2075
static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs,
			       const char __user *buf, int in_len, int out_len)
2076 2077 2078 2079
{
	struct ib_uverbs_post_send      cmd;
	struct ib_uverbs_post_send_resp resp;
	struct ib_uverbs_send_wr       *user_wr;
2080 2081
	struct ib_send_wr              *wr = NULL, *last, *next;
	const struct ib_send_wr	       *bad_wr;
2082 2083
	struct ib_qp                   *qp;
	int                             i, sg_ind;
2084
	int				is_ud;
2085
	int ret, ret2;
2086
	size_t                          next_size;
2087 2088 2089
	const struct ib_sge __user *sgls;
	const void __user *wqes;
	struct uverbs_req_iter iter;
2090

2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
	if (ret)
		return ret;
	wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
	if (IS_ERR(wqes))
		return PTR_ERR(wqes);
	sgls = uverbs_request_next_ptr(
		&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
	if (IS_ERR(sgls))
		return PTR_ERR(sgls);
	ret = uverbs_request_finish(&iter);
	if (ret)
		return ret;
2104 2105 2106 2107 2108

	user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
	if (!user_wr)
		return -ENOMEM;

2109
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2110
	if (!qp)
2111 2112
		goto out;

2113
	is_ud = qp->qp_type == IB_QPT_UD;
2114 2115 2116
	sg_ind = 0;
	last = NULL;
	for (i = 0; i < cmd.wr_count; ++i) {
2117
		if (copy_from_user(user_wr, wqes + i * cmd.wqe_size,
2118 2119
				   cmd.wqe_size)) {
			ret = -EFAULT;
2120
			goto out_put;
2121 2122 2123 2124
		}

		if (user_wr->num_sge + sg_ind > cmd.sge_count) {
			ret = -EINVAL;
2125
			goto out_put;
2126 2127
		}

C
Christoph Hellwig 已提交
2128 2129 2130 2131 2132 2133 2134 2135 2136
		if (is_ud) {
			struct ib_ud_wr *ud;

			if (user_wr->opcode != IB_WR_SEND &&
			    user_wr->opcode != IB_WR_SEND_WITH_IMM) {
				ret = -EINVAL;
				goto out_put;
			}

2137 2138
			next_size = sizeof(*ud);
			ud = alloc_wr(next_size, user_wr->num_sge);
C
Christoph Hellwig 已提交
2139 2140 2141 2142 2143
			if (!ud) {
				ret = -ENOMEM;
				goto out_put;
			}

2144
			ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
2145
						   user_wr->wr.ud.ah, attrs);
C
Christoph Hellwig 已提交
2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159
			if (!ud->ah) {
				kfree(ud);
				ret = -EINVAL;
				goto out_put;
			}
			ud->remote_qpn = user_wr->wr.ud.remote_qpn;
			ud->remote_qkey = user_wr->wr.ud.remote_qkey;

			next = &ud->wr;
		} else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
			   user_wr->opcode == IB_WR_RDMA_WRITE ||
			   user_wr->opcode == IB_WR_RDMA_READ) {
			struct ib_rdma_wr *rdma;

2160 2161
			next_size = sizeof(*rdma);
			rdma = alloc_wr(next_size, user_wr->num_sge);
C
Christoph Hellwig 已提交
2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174
			if (!rdma) {
				ret = -ENOMEM;
				goto out_put;
			}

			rdma->remote_addr = user_wr->wr.rdma.remote_addr;
			rdma->rkey = user_wr->wr.rdma.rkey;

			next = &rdma->wr;
		} else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
			   user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
			struct ib_atomic_wr *atomic;

2175 2176
			next_size = sizeof(*atomic);
			atomic = alloc_wr(next_size, user_wr->num_sge);
C
Christoph Hellwig 已提交
2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
			if (!atomic) {
				ret = -ENOMEM;
				goto out_put;
			}

			atomic->remote_addr = user_wr->wr.atomic.remote_addr;
			atomic->compare_add = user_wr->wr.atomic.compare_add;
			atomic->swap = user_wr->wr.atomic.swap;
			atomic->rkey = user_wr->wr.atomic.rkey;

			next = &atomic->wr;
		} else if (user_wr->opcode == IB_WR_SEND ||
			   user_wr->opcode == IB_WR_SEND_WITH_IMM ||
			   user_wr->opcode == IB_WR_SEND_WITH_INV) {
2191 2192
			next_size = sizeof(*next);
			next = alloc_wr(next_size, user_wr->num_sge);
C
Christoph Hellwig 已提交
2193 2194 2195 2196 2197 2198
			if (!next) {
				ret = -ENOMEM;
				goto out_put;
			}
		} else {
			ret = -EINVAL;
2199
			goto out_put;
2200 2201
		}

C
Christoph Hellwig 已提交
2202 2203 2204 2205 2206 2207 2208 2209
		if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
		    user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
			next->ex.imm_data =
					(__be32 __force) user_wr->ex.imm_data;
		} else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
			next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
		}

2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223
		if (!last)
			wr = next;
		else
			last->next = next;
		last = next;

		next->next       = NULL;
		next->wr_id      = user_wr->wr_id;
		next->num_sge    = user_wr->num_sge;
		next->opcode     = user_wr->opcode;
		next->send_flags = user_wr->send_flags;

		if (next->num_sge) {
			next->sg_list = (void *) next +
2224
				ALIGN(next_size, sizeof(struct ib_sge));
2225 2226 2227
			if (copy_from_user(next->sg_list, sgls + sg_ind,
					   next->num_sge *
						   sizeof(struct ib_sge))) {
2228
				ret = -EFAULT;
2229
				goto out_put;
2230 2231 2232 2233 2234 2235 2236
			}
			sg_ind += next->num_sge;
		} else
			next->sg_list = NULL;
	}

	resp.bad_wr = 0;
2237
	ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2238 2239 2240 2241 2242 2243 2244
	if (ret)
		for (next = wr; next; next = next->next) {
			++resp.bad_wr;
			if (next == bad_wr)
				break;
		}

2245 2246 2247
	ret2 = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret2)
		ret = ret2;
2248

2249
out_put:
2250
	uobj_put_obj_read(qp);
2251 2252

	while (wr) {
C
Christoph Hellwig 已提交
2253
		if (is_ud && ud_wr(wr)->ah)
2254
			uobj_put_obj_read(ud_wr(wr)->ah);
2255 2256 2257 2258 2259
		next = wr->next;
		kfree(wr);
		wr = next;
	}

2260
out:
2261 2262
	kfree(user_wr);

2263
	return ret;
2264 2265
}

2266 2267 2268
static struct ib_recv_wr *
ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
			  u32 wqe_size, u32 sge_count)
2269 2270 2271 2272 2273 2274
{
	struct ib_uverbs_recv_wr *user_wr;
	struct ib_recv_wr        *wr = NULL, *last, *next;
	int                       sg_ind;
	int                       i;
	int                       ret;
2275 2276
	const struct ib_sge __user *sgls;
	const void __user *wqes;
2277 2278 2279 2280

	if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
		return ERR_PTR(-EINVAL);

2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291
	wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
	if (IS_ERR(wqes))
		return ERR_CAST(wqes);
	sgls = uverbs_request_next_ptr(
		iter, sge_count * sizeof(struct ib_uverbs_sge));
	if (IS_ERR(sgls))
		return ERR_CAST(sgls);
	ret = uverbs_request_finish(iter);
	if (ret)
		return ERR_PTR(ret);

2292 2293 2294 2295 2296 2297 2298
	user_wr = kmalloc(wqe_size, GFP_KERNEL);
	if (!user_wr)
		return ERR_PTR(-ENOMEM);

	sg_ind = 0;
	last = NULL;
	for (i = 0; i < wr_count; ++i) {
2299
		if (copy_from_user(user_wr, wqes + i * wqe_size,
2300 2301 2302 2303 2304 2305 2306 2307 2308 2309
				   wqe_size)) {
			ret = -EFAULT;
			goto err;
		}

		if (user_wr->num_sge + sg_ind > sge_count) {
			ret = -EINVAL;
			goto err;
		}

2310 2311 2312 2313 2314 2315 2316
		if (user_wr->num_sge >=
		    (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
		    sizeof (struct ib_sge)) {
			ret = -EINVAL;
			goto err;
		}

2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
			       user_wr->num_sge * sizeof (struct ib_sge),
			       GFP_KERNEL);
		if (!next) {
			ret = -ENOMEM;
			goto err;
		}

		if (!last)
			wr = next;
		else
			last->next = next;
		last = next;

		next->next       = NULL;
		next->wr_id      = user_wr->wr_id;
		next->num_sge    = user_wr->num_sge;

		if (next->num_sge) {
			next->sg_list = (void *) next +
				ALIGN(sizeof *next, sizeof (struct ib_sge));
2338 2339 2340
			if (copy_from_user(next->sg_list, sgls + sg_ind,
					   next->num_sge *
						   sizeof(struct ib_sge))) {
2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363
				ret = -EFAULT;
				goto err;
			}
			sg_ind += next->num_sge;
		} else
			next->sg_list = NULL;
	}

	kfree(user_wr);
	return wr;

err:
	kfree(user_wr);

	while (wr) {
		next = wr->next;
		kfree(wr);
		wr = next;
	}

	return ERR_PTR(ret);
}

2364 2365
static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs,
			       const char __user *buf, int in_len, int out_len)
2366 2367 2368
{
	struct ib_uverbs_post_recv      cmd;
	struct ib_uverbs_post_recv_resp resp;
2369 2370
	struct ib_recv_wr              *wr, *next;
	const struct ib_recv_wr	       *bad_wr;
2371
	struct ib_qp                   *qp;
2372
	int ret, ret2;
2373
	struct uverbs_req_iter iter;
2374

2375 2376 2377
	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2378

2379 2380
	wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
				       cmd.sge_count);
2381 2382 2383
	if (IS_ERR(wr))
		return PTR_ERR(wr);

2384
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2385 2386
	if (!qp) {
		ret = -EINVAL;
2387
		goto out;
2388
	}
2389 2390

	resp.bad_wr = 0;
2391
	ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2392

2393 2394
	uobj_put_obj_read(qp);
	if (ret) {
2395 2396 2397 2398 2399
		for (next = wr; next; next = next->next) {
			++resp.bad_wr;
			if (next == bad_wr)
				break;
		}
2400
	}
2401

2402 2403 2404
	ret2 = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret2)
		ret = ret2;
2405 2406 2407 2408 2409 2410 2411
out:
	while (wr) {
		next = wr->next;
		kfree(wr);
		wr = next;
	}

2412
	return ret;
2413 2414
}

2415 2416 2417
static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs,
				   const char __user *buf, int in_len,
				   int out_len)
2418 2419 2420
{
	struct ib_uverbs_post_srq_recv      cmd;
	struct ib_uverbs_post_srq_recv_resp resp;
2421 2422
	struct ib_recv_wr                  *wr, *next;
	const struct ib_recv_wr		   *bad_wr;
2423
	struct ib_srq                      *srq;
2424
	int ret, ret2;
2425
	struct uverbs_req_iter iter;
2426

2427 2428 2429
	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2430

2431 2432
	wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
				       cmd.sge_count);
2433 2434 2435
	if (IS_ERR(wr))
		return PTR_ERR(wr);

2436
	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
2437 2438
	if (!srq) {
		ret = -EINVAL;
2439
		goto out;
2440
	}
2441 2442

	resp.bad_wr = 0;
2443
	ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2444

2445
	uobj_put_obj_read(srq);
2446

2447 2448 2449 2450 2451 2452 2453
	if (ret)
		for (next = wr; next; next = next->next) {
			++resp.bad_wr;
			if (next == bad_wr)
				break;
		}

2454 2455 2456
	ret2 = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret2)
		ret = ret2;
2457 2458 2459 2460 2461 2462 2463 2464

out:
	while (wr) {
		next = wr->next;
		kfree(wr);
		wr = next;
	}

2465
	return ret;
2466 2467
}

2468 2469
static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs,
			       const char __user *buf, int in_len, int out_len)
2470 2471 2472 2473 2474 2475
{
	struct ib_uverbs_create_ah	 cmd;
	struct ib_uverbs_create_ah_resp	 resp;
	struct ib_uobject		*uobj;
	struct ib_pd			*pd;
	struct ib_ah			*ah;
2476
	struct rdma_ah_attr		attr = {};
2477
	int ret;
2478
	struct ib_device *ib_dev;
2479

2480 2481 2482
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2483

2484
	uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev);
2485 2486
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
2487

2488 2489 2490 2491 2492
	if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) {
		ret = -EINVAL;
		goto err;
	}

2493
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
2494
	if (!pd) {
2495
		ret = -EINVAL;
2496
		goto err;
2497 2498
	}

2499
	attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
2500
	rdma_ah_set_make_grd(&attr, false);
2501 2502 2503 2504 2505 2506
	rdma_ah_set_dlid(&attr, cmd.attr.dlid);
	rdma_ah_set_sl(&attr, cmd.attr.sl);
	rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
	rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
	rdma_ah_set_port_num(&attr, cmd.attr.port_num);

2507
	if (cmd.attr.is_global) {
2508 2509 2510 2511 2512
		rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
				cmd.attr.grh.sgid_index,
				cmd.attr.grh.hop_limit,
				cmd.attr.grh.traffic_class);
		rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
2513
	} else {
2514
		rdma_ah_set_ah_flags(&attr, 0);
2515
	}
2516

2517
	ah = rdma_create_user_ah(pd, &attr, &attrs->driver_udata);
2518 2519
	if (IS_ERR(ah)) {
		ret = PTR_ERR(ah);
2520
		goto err_put;
2521 2522
	}

2523
	ah->uobject  = uobj;
2524
	uobj->user_handle = cmd.user_handle;
2525
	uobj->object = ah;
2526 2527 2528

	resp.ah_handle = uobj->id;

2529 2530
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
2531
		goto err_copy;
2532

2533
	uobj_put_obj_read(pd);
2534
	return uobj_alloc_commit(uobj);
2535

2536
err_copy:
2537
	rdma_destroy_ah(ah);
2538

2539 2540
err_put:
	uobj_put_obj_read(pd);
2541

2542
err:
2543
	uobj_alloc_abort(uobj);
2544 2545 2546
	return ret;
}

2547 2548
static int ib_uverbs_destroy_ah(struct uverbs_attr_bundle *attrs,
				const char __user *buf, int in_len, int out_len)
2549 2550
{
	struct ib_uverbs_destroy_ah cmd;
2551
	int ret;
2552

2553 2554 2555
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2556

2557
	return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, attrs);
2558 2559
}

2560 2561 2562
static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs,
				  const char __user *buf, int in_len,
				  int out_len)
2563 2564 2565
{
	struct ib_uverbs_attach_mcast cmd;
	struct ib_qp                 *qp;
2566
	struct ib_uqp_object         *obj;
2567
	struct ib_uverbs_mcast_entry *mcast;
2568
	int                           ret;
2569

2570 2571 2572
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2573

2574
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2575 2576
	if (!qp)
		return -EINVAL;
2577

2578
	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2579

2580
	mutex_lock(&obj->mcast_lock);
2581
	list_for_each_entry(mcast, &obj->mcast_list, list)
2582 2583 2584
		if (cmd.mlid == mcast->lid &&
		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
			ret = 0;
2585
			goto out_put;
2586 2587 2588 2589 2590
		}

	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
	if (!mcast) {
		ret = -ENOMEM;
2591
		goto out_put;
2592 2593 2594 2595
	}

	mcast->lid = cmd.mlid;
	memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2596

2597
	ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2598 2599 2600
	if (!ret)
		list_add_tail(&mcast->list, &obj->mcast_list);
	else
2601 2602
		kfree(mcast);

2603
out_put:
2604
	mutex_unlock(&obj->mcast_lock);
2605
	uobj_put_obj_read(qp);
2606

2607
	return ret;
2608 2609
}

2610 2611 2612
static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs,
				  const char __user *buf, int in_len,
				  int out_len)
2613 2614
{
	struct ib_uverbs_detach_mcast cmd;
2615
	struct ib_uqp_object         *obj;
2616
	struct ib_qp                 *qp;
2617
	struct ib_uverbs_mcast_entry *mcast;
2618
	int                           ret = -EINVAL;
2619
	bool                          found = false;
2620

2621 2622 2623
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
2624

2625
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2626 2627
	if (!qp)
		return -EINVAL;
2628

2629
	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2630
	mutex_lock(&obj->mcast_lock);
2631

2632
	list_for_each_entry(mcast, &obj->mcast_list, list)
2633 2634 2635 2636
		if (cmd.mlid == mcast->lid &&
		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
			list_del(&mcast->list);
			kfree(mcast);
2637
			found = true;
2638 2639 2640
			break;
		}

2641 2642 2643 2644 2645 2646 2647
	if (!found) {
		ret = -EINVAL;
		goto out_put;
	}

	ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);

2648
out_put:
2649
	mutex_unlock(&obj->mcast_lock);
2650
	uobj_put_obj_read(qp);
2651
	return ret;
2652
}
2653

2654
struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
2655 2656 2657
{
	struct ib_uflow_resources *resources;

2658
	resources = kzalloc(sizeof(*resources), GFP_KERNEL);
2659 2660

	if (!resources)
2661
		return NULL;
2662

2663 2664 2665
	if (!num_specs)
		goto out;

2666 2667 2668 2669 2670
	resources->counters =
		kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
	resources->collection =
		kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);

2671 2672
	if (!resources->counters || !resources->collection)
		goto err;
2673

2674
out:
2675 2676
	resources->max = num_specs;
	return resources;
2677

2678
err:
2679 2680
	kfree(resources->counters);
	kfree(resources);
2681

2682
	return NULL;
2683
}
2684
EXPORT_SYMBOL(flow_resources_alloc);
2685 2686 2687 2688 2689

void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
{
	unsigned int i;

2690 2691 2692
	if (!uflow_res)
		return;

2693
	for (i = 0; i < uflow_res->collection_num; i++)
2694 2695
		atomic_dec(&uflow_res->collection[i]->usecnt);

2696 2697 2698 2699 2700
	for (i = 0; i < uflow_res->counters_num; i++)
		atomic_dec(&uflow_res->counters[i]->usecnt);

	kfree(uflow_res->collection);
	kfree(uflow_res->counters);
2701 2702
	kfree(uflow_res);
}
2703
EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
2704

2705 2706 2707
void flow_resources_add(struct ib_uflow_resources *uflow_res,
			enum ib_flow_spec_type type,
			void *ibobj)
2708 2709 2710
{
	WARN_ON(uflow_res->num >= uflow_res->max);

2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726
	switch (type) {
	case IB_FLOW_SPEC_ACTION_HANDLE:
		atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
		uflow_res->collection[uflow_res->collection_num++] =
			(struct ib_flow_action *)ibobj;
		break;
	case IB_FLOW_SPEC_ACTION_COUNT:
		atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
		uflow_res->counters[uflow_res->counters_num++] =
			(struct ib_counters *)ibobj;
		break;
	default:
		WARN_ON(1);
	}

	uflow_res->num++;
2727
}
2728
EXPORT_SYMBOL(flow_resources_add);
2729

2730
static int kern_spec_to_ib_spec_action(const struct uverbs_attr_bundle *attrs,
2731 2732 2733
				       struct ib_uverbs_flow_spec *kern_spec,
				       union ib_flow_spec *ib_spec,
				       struct ib_uflow_resources *uflow_res)
2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744
{
	ib_spec->type = kern_spec->type;
	switch (ib_spec->type) {
	case IB_FLOW_SPEC_ACTION_TAG:
		if (kern_spec->flow_tag.size !=
		    sizeof(struct ib_uverbs_flow_spec_action_tag))
			return -EINVAL;

		ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
		ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
		break;
2745 2746 2747 2748 2749 2750 2751
	case IB_FLOW_SPEC_ACTION_DROP:
		if (kern_spec->drop.size !=
		    sizeof(struct ib_uverbs_flow_spec_action_drop))
			return -EINVAL;

		ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
		break;
2752 2753 2754 2755 2756 2757 2758
	case IB_FLOW_SPEC_ACTION_HANDLE:
		if (kern_spec->action.size !=
		    sizeof(struct ib_uverbs_flow_spec_action_handle))
			return -EOPNOTSUPP;
		ib_spec->action.act = uobj_get_obj_read(flow_action,
							UVERBS_OBJECT_FLOW_ACTION,
							kern_spec->action.handle,
2759
							attrs);
2760 2761 2762 2763
		if (!ib_spec->action.act)
			return -EINVAL;
		ib_spec->action.size =
			sizeof(struct ib_flow_spec_action_handle);
2764 2765 2766
		flow_resources_add(uflow_res,
				   IB_FLOW_SPEC_ACTION_HANDLE,
				   ib_spec->action.act);
2767 2768
		uobj_put_obj_read(ib_spec->action.act);
		break;
2769 2770 2771 2772 2773 2774 2775 2776
	case IB_FLOW_SPEC_ACTION_COUNT:
		if (kern_spec->flow_count.size !=
			sizeof(struct ib_uverbs_flow_spec_action_count))
			return -EINVAL;
		ib_spec->flow_count.counters =
			uobj_get_obj_read(counters,
					  UVERBS_OBJECT_COUNTERS,
					  kern_spec->flow_count.handle,
2777
					  attrs);
2778 2779 2780 2781 2782 2783 2784 2785 2786
		if (!ib_spec->flow_count.counters)
			return -EINVAL;
		ib_spec->flow_count.size =
				sizeof(struct ib_flow_spec_action_count);
		flow_resources_add(uflow_res,
				   IB_FLOW_SPEC_ACTION_COUNT,
				   ib_spec->flow_count.counters);
		uobj_put_obj_read(ib_spec->flow_count.counters);
		break;
2787 2788 2789 2790 2791 2792
	default:
		return -EINVAL;
	}
	return 0;
}

2793
static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
2794 2795 2796 2797 2798
{
	/* Returns user space filter size, includes padding */
	return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
}

2799
static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816
				u16 ib_real_filter_sz)
{
	/*
	 * User space filter structures must be 64 bit aligned, otherwise this
	 * may pass, but we won't handle additional new attributes.
	 */

	if (kern_filter_size > ib_real_filter_sz) {
		if (memchr_inv(kern_spec_filter +
			       ib_real_filter_sz, 0,
			       kern_filter_size - ib_real_filter_sz))
			return -EINVAL;
		return ib_real_filter_sz;
	}
	return kern_filter_size;
}

2817 2818 2819 2820 2821
int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
					  const void *kern_spec_mask,
					  const void *kern_spec_val,
					  size_t kern_filter_sz,
					  union ib_flow_spec *ib_spec)
2822
{
2823 2824 2825 2826 2827 2828 2829
	ssize_t actual_filter_sz;
	ssize_t ib_filter_sz;

	/* User flow spec size must be aligned to 4 bytes */
	if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
		return -EINVAL;

2830 2831
	ib_spec->type = type;

2832 2833
	if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
		return -EINVAL;
2834

2835
	switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2836
	case IB_FLOW_SPEC_ETH:
2837 2838 2839 2840 2841
		ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
2842
			return -EINVAL;
2843 2844 2845
		ib_spec->size = sizeof(struct ib_flow_spec_eth);
		memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
2846 2847
		break;
	case IB_FLOW_SPEC_IPV4:
2848 2849 2850 2851 2852
		ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
2853
			return -EINVAL;
2854 2855 2856
		ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
		memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
2857
		break;
2858
	case IB_FLOW_SPEC_IPV6:
2859 2860 2861 2862 2863
		ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
2864
			return -EINVAL;
2865 2866 2867
		ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
		memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
2868 2869 2870 2871

		if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
		    (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
			return -EINVAL;
2872
		break;
2873 2874
	case IB_FLOW_SPEC_TCP:
	case IB_FLOW_SPEC_UDP:
2875 2876 2877 2878 2879
		ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
2880
			return -EINVAL;
2881 2882 2883
		ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
		memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
2884
		break;
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
	case IB_FLOW_SPEC_VXLAN_TUNNEL:
		ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
			return -EINVAL;
		ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
		memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);

		if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
		    (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
			return -EINVAL;
		break;
2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910
	case IB_FLOW_SPEC_ESP:
		ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
			return -EINVAL;
		ib_spec->esp.size = sizeof(struct ib_flow_spec_esp);
		memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
		break;
2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921
	case IB_FLOW_SPEC_GRE:
		ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
			return -EINVAL;
		ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
		memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
		break;
2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932
	case IB_FLOW_SPEC_MPLS:
		ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
		actual_filter_sz = spec_filter_size(kern_spec_mask,
						    kern_filter_sz,
						    ib_filter_sz);
		if (actual_filter_sz <= 0)
			return -EINVAL;
		ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
		memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
		memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
		break;
2933 2934 2935 2936 2937 2938
	default:
		return -EINVAL;
	}
	return 0;
}

2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957
static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
				       union ib_flow_spec *ib_spec)
{
	ssize_t kern_filter_sz;
	void *kern_spec_mask;
	void *kern_spec_val;

	kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);

	kern_spec_val = (void *)kern_spec +
		sizeof(struct ib_uverbs_flow_spec_hdr);
	kern_spec_mask = kern_spec_val + kern_filter_sz;

	return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type,
						     kern_spec_mask,
						     kern_spec_val,
						     kern_filter_sz, ib_spec);
}

2958
static int kern_spec_to_ib_spec(struct uverbs_attr_bundle *attrs,
2959 2960 2961
				struct ib_uverbs_flow_spec *kern_spec,
				union ib_flow_spec *ib_spec,
				struct ib_uflow_resources *uflow_res)
2962 2963 2964 2965 2966
{
	if (kern_spec->reserved)
		return -EINVAL;

	if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2967
		return kern_spec_to_ib_spec_action(attrs, kern_spec, ib_spec,
2968
						   uflow_res);
2969 2970 2971 2972
	else
		return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
}

2973
static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs,
2974
				  struct ib_udata *ucore)
Y
Yishai Hadas 已提交
2975
{
2976
	struct ib_uverbs_ex_create_wq cmd;
Y
Yishai Hadas 已提交
2977 2978 2979 2980 2981 2982 2983
	struct ib_uverbs_ex_create_wq_resp resp = {};
	struct ib_uwq_object           *obj;
	int err = 0;
	struct ib_cq *cq;
	struct ib_pd *pd;
	struct ib_wq *wq;
	struct ib_wq_init_attr wq_init_attr = {};
2984
	struct ib_device *ib_dev;
Y
Yishai Hadas 已提交
2985

2986
	err = uverbs_request(attrs, &cmd, sizeof(cmd));
Y
Yishai Hadas 已提交
2987 2988 2989 2990 2991 2992
	if (err)
		return err;

	if (cmd.comp_mask)
		return -EOPNOTSUPP;

2993
	obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, attrs,
2994
						 &ib_dev);
2995 2996
	if (IS_ERR(obj))
		return PTR_ERR(obj);
Y
Yishai Hadas 已提交
2997

2998
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
Y
Yishai Hadas 已提交
2999 3000 3001 3002 3003
	if (!pd) {
		err = -EINVAL;
		goto err_uobj;
	}

3004
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
Y
Yishai Hadas 已提交
3005 3006 3007 3008 3009 3010 3011 3012
	if (!cq) {
		err = -EINVAL;
		goto err_put_pd;
	}

	wq_init_attr.cq = cq;
	wq_init_attr.max_sge = cmd.max_sge;
	wq_init_attr.max_wr = cmd.max_wr;
3013
	wq_init_attr.wq_context = attrs->ufile;
Y
Yishai Hadas 已提交
3014 3015
	wq_init_attr.wq_type = cmd.wq_type;
	wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
3016
	wq_init_attr.create_flags = cmd.create_flags;
Y
Yishai Hadas 已提交
3017 3018
	obj->uevent.events_reported = 0;
	INIT_LIST_HEAD(&obj->uevent.event_list);
3019

3020
	wq = pd->device->create_wq(pd, &wq_init_attr, &attrs->driver_udata);
Y
Yishai Hadas 已提交
3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043
	if (IS_ERR(wq)) {
		err = PTR_ERR(wq);
		goto err_put_cq;
	}

	wq->uobject = &obj->uevent.uobject;
	obj->uevent.uobject.object = wq;
	wq->wq_type = wq_init_attr.wq_type;
	wq->cq = cq;
	wq->pd = pd;
	wq->device = pd->device;
	wq->wq_context = wq_init_attr.wq_context;
	atomic_set(&wq->usecnt, 0);
	atomic_inc(&pd->usecnt);
	atomic_inc(&cq->usecnt);
	wq->uobject = &obj->uevent.uobject;
	obj->uevent.uobject.object = wq;

	memset(&resp, 0, sizeof(resp));
	resp.wq_handle = obj->uevent.uobject.id;
	resp.max_sge = wq_init_attr.max_sge;
	resp.max_wr = wq_init_attr.max_wr;
	resp.wqn = wq->wq_num;
3044
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3045
	err = uverbs_response(attrs, &resp, sizeof(resp));
Y
Yishai Hadas 已提交
3046 3047 3048
	if (err)
		goto err_copy;

3049 3050
	uobj_put_obj_read(pd);
	uobj_put_obj_read(cq);
3051
	return uobj_alloc_commit(&obj->uevent.uobject);
Y
Yishai Hadas 已提交
3052 3053 3054 3055

err_copy:
	ib_destroy_wq(wq);
err_put_cq:
3056
	uobj_put_obj_read(cq);
Y
Yishai Hadas 已提交
3057
err_put_pd:
3058
	uobj_put_obj_read(pd);
Y
Yishai Hadas 已提交
3059
err_uobj:
3060
	uobj_alloc_abort(&obj->uevent.uobject);
Y
Yishai Hadas 已提交
3061 3062 3063 3064

	return err;
}

3065
static int ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle *attrs,
3066
				   struct ib_udata *ucore)
Y
Yishai Hadas 已提交
3067
{
3068
	struct ib_uverbs_ex_destroy_wq	cmd;
Y
Yishai Hadas 已提交
3069 3070 3071 3072 3073
	struct ib_uverbs_ex_destroy_wq_resp	resp = {};
	struct ib_uobject		*uobj;
	struct ib_uwq_object		*obj;
	int				ret;

3074
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
Y
Yishai Hadas 已提交
3075 3076 3077 3078 3079 3080
	if (ret)
		return ret;

	if (cmd.comp_mask)
		return -EOPNOTSUPP;

3081
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3082
	uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
3083 3084
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
Y
Yishai Hadas 已提交
3085 3086 3087

	obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
	resp.events_reported = obj->uevent.events_reported;
3088 3089

	uobj_put_destroy(uobj);
Y
Yishai Hadas 已提交
3090

3091
	return uverbs_response(attrs, &resp, sizeof(resp));
Y
Yishai Hadas 已提交
3092 3093
}

3094
static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs,
3095
				  struct ib_udata *ucore)
Y
Yishai Hadas 已提交
3096
{
3097
	struct ib_uverbs_ex_modify_wq cmd;
Y
Yishai Hadas 已提交
3098 3099 3100 3101
	struct ib_wq *wq;
	struct ib_wq_attr wq_attr = {};
	int ret;

3102
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
Y
Yishai Hadas 已提交
3103 3104 3105 3106 3107 3108
	if (ret)
		return ret;

	if (!cmd.attr_mask)
		return -EINVAL;

3109
	if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
Y
Yishai Hadas 已提交
3110 3111
		return -EINVAL;

3112
	wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
Y
Yishai Hadas 已提交
3113 3114 3115 3116 3117
	if (!wq)
		return -EINVAL;

	wq_attr.curr_wq_state = cmd.curr_wq_state;
	wq_attr.wq_state = cmd.wq_state;
3118 3119 3120 3121
	if (cmd.attr_mask & IB_WQ_FLAGS) {
		wq_attr.flags = cmd.flags;
		wq_attr.flags_mask = cmd.flags_mask;
	}
3122 3123
	ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask,
				    &attrs->driver_udata);
3124
	uobj_put_obj_read(wq);
Y
Yishai Hadas 已提交
3125 3126 3127
	return ret;
}

3128
static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs,
3129
					     struct ib_udata *ucore)
3130
{
3131
	struct ib_uverbs_ex_create_rwq_ind_table cmd;
3132 3133
	struct ib_uverbs_ex_create_rwq_ind_table_resp  resp = {};
	struct ib_uobject		  *uobj;
3134
	int err;
3135 3136 3137 3138 3139 3140 3141
	struct ib_rwq_ind_table_init_attr init_attr = {};
	struct ib_rwq_ind_table *rwq_ind_tbl;
	struct ib_wq	**wqs = NULL;
	u32 *wqs_handles = NULL;
	struct ib_wq	*wq = NULL;
	int i, j, num_read_wqs;
	u32 num_wq_handles;
3142
	struct uverbs_req_iter iter;
3143
	struct ib_device *ib_dev;
3144

3145
	err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160
	if (err)
		return err;

	if (cmd.comp_mask)
		return -EOPNOTSUPP;

	if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
		return -EINVAL;

	num_wq_handles = 1 << cmd.log_ind_tbl_size;
	wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
			      GFP_KERNEL);
	if (!wqs_handles)
		return -ENOMEM;

3161 3162 3163 3164 3165 3166
	err = uverbs_request_next(&iter, wqs_handles,
				  num_wq_handles * sizeof(__u32));
	if (err)
		goto err_free;

	err = uverbs_request_finish(&iter);
3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177
	if (err)
		goto err_free;

	wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
	if (!wqs) {
		err = -ENOMEM;
		goto  err_free;
	}

	for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
			num_read_wqs++) {
3178
		wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
3179
				       wqs_handles[num_read_wqs], attrs);
3180 3181 3182 3183 3184 3185 3186 3187
		if (!wq) {
			err = -EINVAL;
			goto put_wqs;
		}

		wqs[num_read_wqs] = wq;
	}

3188
	uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev);
3189 3190
	if (IS_ERR(uobj)) {
		err = PTR_ERR(uobj);
3191 3192 3193 3194 3195
		goto put_wqs;
	}

	init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
	init_attr.ind_tbl = wqs;
3196

3197 3198
	rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr,
						   &attrs->driver_udata);
3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216

	if (IS_ERR(rwq_ind_tbl)) {
		err = PTR_ERR(rwq_ind_tbl);
		goto err_uobj;
	}

	rwq_ind_tbl->ind_tbl = wqs;
	rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
	rwq_ind_tbl->uobject = uobj;
	uobj->object = rwq_ind_tbl;
	rwq_ind_tbl->device = ib_dev;
	atomic_set(&rwq_ind_tbl->usecnt, 0);

	for (i = 0; i < num_wq_handles; i++)
		atomic_inc(&wqs[i]->usecnt);

	resp.ind_tbl_handle = uobj->id;
	resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3217
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3218

3219
	err = uverbs_response(attrs, &resp, sizeof(resp));
3220 3221 3222 3223 3224 3225
	if (err)
		goto err_copy;

	kfree(wqs_handles);

	for (j = 0; j < num_read_wqs; j++)
3226
		uobj_put_obj_read(wqs[j]);
3227

3228
	return uobj_alloc_commit(uobj);
3229 3230 3231 3232

err_copy:
	ib_destroy_rwq_ind_table(rwq_ind_tbl);
err_uobj:
3233
	uobj_alloc_abort(uobj);
3234 3235
put_wqs:
	for (j = 0; j < num_read_wqs; j++)
3236
		uobj_put_obj_read(wqs[j]);
3237 3238 3239 3240 3241 3242
err_free:
	kfree(wqs_handles);
	kfree(wqs);
	return err;
}

3243
static int ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle *attrs,
3244
					      struct ib_udata *ucore)
3245
{
3246 3247
	struct ib_uverbs_ex_destroy_rwq_ind_table cmd;
	int ret;
3248

3249
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3250 3251 3252 3253 3254 3255
	if (ret)
		return ret;

	if (cmd.comp_mask)
		return -EOPNOTSUPP;

3256
	return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL,
3257
				    cmd.ind_tbl_handle, attrs);
3258 3259
}

3260
static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs,
3261
				    struct ib_udata *ucore)
3262 3263 3264 3265 3266
{
	struct ib_uverbs_create_flow	  cmd;
	struct ib_uverbs_create_flow_resp resp;
	struct ib_uobject		  *uobj;
	struct ib_flow			  *flow_id;
3267
	struct ib_uverbs_flow_attr	  *kern_flow_attr;
3268 3269
	struct ib_flow_attr		  *flow_attr;
	struct ib_qp			  *qp;
3270
	struct ib_uflow_resources	  *uflow_res;
3271
	struct ib_uverbs_flow_spec_hdr	  *kern_spec;
3272 3273
	struct uverbs_req_iter iter;
	int err;
3274 3275
	void *ib_spec;
	int i;
3276
	struct ib_device *ib_dev;
3277

3278
	err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3279 3280 3281
	if (err)
		return err;

3282 3283 3284
	if (cmd.comp_mask)
		return -EINVAL;

3285
	if (!capable(CAP_NET_RAW))
3286 3287
		return -EPERM;

3288 3289 3290 3291 3292 3293 3294 3295
	if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
		return -EINVAL;

	if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
	    ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
	     (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
		return -EINVAL;

3296
	if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3297 3298
		return -EINVAL;

3299
	if (cmd.flow_attr.size >
3300
	    (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3301 3302
		return -EINVAL;

3303 3304 3305 3306
	if (cmd.flow_attr.reserved[0] ||
	    cmd.flow_attr.reserved[1])
		return -EINVAL;

3307
	if (cmd.flow_attr.num_of_specs) {
3308 3309
		kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
					 GFP_KERNEL);
3310 3311 3312
		if (!kern_flow_attr)
			return -ENOMEM;

3313
		*kern_flow_attr = cmd.flow_attr;
3314 3315
		err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs,
					  cmd.flow_attr.size);
3316
		if (err)
3317 3318 3319 3320 3321
			goto err_free_attr;
	} else {
		kern_flow_attr = &cmd.flow_attr;
	}

3322 3323 3324 3325
	err = uverbs_request_finish(&iter);
	if (err)
		goto err_free_attr;

3326
	uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev);
3327 3328
	if (IS_ERR(uobj)) {
		err = PTR_ERR(uobj);
3329 3330 3331
		goto err_free_attr;
	}

3332
	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
3333 3334 3335 3336 3337
	if (!qp) {
		err = -EINVAL;
		goto err_uobj;
	}

3338 3339 3340 3341 3342
	if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
		err = -EINVAL;
		goto err_put;
	}

3343 3344
	flow_attr = kzalloc(struct_size(flow_attr, flows,
				cmd.flow_attr.num_of_specs), GFP_KERNEL);
3345 3346 3347 3348
	if (!flow_attr) {
		err = -ENOMEM;
		goto err_put;
	}
3349 3350 3351 3352 3353
	uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs);
	if (!uflow_res) {
		err = -ENOMEM;
		goto err_free_flow_attr;
	}
3354 3355 3356 3357 3358 3359 3360 3361

	flow_attr->type = kern_flow_attr->type;
	flow_attr->priority = kern_flow_attr->priority;
	flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
	flow_attr->port = kern_flow_attr->port;
	flow_attr->flags = kern_flow_attr->flags;
	flow_attr->size = sizeof(*flow_attr);

3362
	kern_spec = kern_flow_attr->flow_specs;
3363
	ib_spec = flow_attr + 1;
3364
	for (i = 0; i < flow_attr->num_of_specs &&
3365
			cmd.flow_attr.size >= sizeof(*kern_spec) &&
3366 3367 3368
			cmd.flow_attr.size >= kern_spec->size;
	     i++) {
		err = kern_spec_to_ib_spec(
3369
				attrs, (struct ib_uverbs_flow_spec *)kern_spec,
3370
				ib_spec, uflow_res);
3371 3372
		if (err)
			goto err_free;
3373

3374 3375
		flow_attr->size +=
			((union ib_flow_spec *) ib_spec)->size;
3376 3377
		cmd.flow_attr.size -= kern_spec->size;
		kern_spec = ((void *)kern_spec) + kern_spec->size;
3378 3379
		ib_spec += ((union ib_flow_spec *) ib_spec)->size;
	}
3380 3381 3382
	if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
		pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
			i, cmd.flow_attr.size);
3383
		err = -EINVAL;
3384 3385
		goto err_free;
	}
3386

3387 3388
	flow_id = qp->device->create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER,
					  &attrs->driver_udata);
3389

3390 3391
	if (IS_ERR(flow_id)) {
		err = PTR_ERR(flow_id);
3392
		goto err_free;
3393
	}
3394 3395

	ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
3396 3397 3398 3399

	memset(&resp, 0, sizeof(resp));
	resp.flow_handle = uobj->id;

3400
	err = uverbs_response(attrs, &resp, sizeof(resp));
3401
	if (err)
3402 3403
		goto err_copy;

3404
	uobj_put_obj_read(qp);
3405 3406 3407
	kfree(flow_attr);
	if (cmd.flow_attr.num_of_specs)
		kfree(kern_flow_attr);
3408
	return uobj_alloc_commit(uobj);
3409
err_copy:
3410 3411
	if (!qp->device->destroy_flow(flow_id))
		atomic_dec(&qp->usecnt);
3412
err_free:
3413 3414
	ib_uverbs_flow_resources_free(uflow_res);
err_free_flow_attr:
3415 3416
	kfree(flow_attr);
err_put:
3417
	uobj_put_obj_read(qp);
3418
err_uobj:
3419
	uobj_alloc_abort(uobj);
3420 3421 3422 3423 3424 3425
err_free_attr:
	if (cmd.flow_attr.num_of_specs)
		kfree(kern_flow_attr);
	return err;
}

3426
static int ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle *attrs,
3427
				     struct ib_udata *ucore)
3428
{
3429 3430 3431
	struct ib_uverbs_destroy_flow	cmd;
	int				ret;

3432
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3433 3434
	if (ret)
		return ret;
3435

3436 3437 3438
	if (cmd.comp_mask)
		return -EINVAL;

3439
	return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, attrs);
3440 3441
}

3442
static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
S
Sean Hefty 已提交
3443 3444
				struct ib_uverbs_create_xsrq *cmd,
				struct ib_udata *udata)
3445 3446
{
	struct ib_uverbs_create_srq_resp resp;
3447
	struct ib_usrq_object           *obj;
3448 3449
	struct ib_pd                    *pd;
	struct ib_srq                   *srq;
3450
	struct ib_uobject               *uninitialized_var(xrcd_uobj);
3451 3452
	struct ib_srq_init_attr          attr;
	int ret;
3453
	struct ib_device *ib_dev;
3454

3455
	obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, attrs,
3456
						  &ib_dev);
3457 3458
	if (IS_ERR(obj))
		return PTR_ERR(obj);
3459

3460 3461 3462
	if (cmd->srq_type == IB_SRQT_TM)
		attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;

3463
	if (cmd->srq_type == IB_SRQT_XRC) {
3464
		xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
3465
					  attrs);
3466
		if (IS_ERR(xrcd_uobj)) {
3467
			ret = -EINVAL;
3468
			goto err;
3469 3470
		}

3471 3472 3473 3474 3475 3476
		attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
		if (!attr.ext.xrc.xrcd) {
			ret = -EINVAL;
			goto err_put_xrcd;
		}

3477 3478
		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
		atomic_inc(&obj->uxrcd->refcnt);
3479
	}
3480

3481
	if (ib_srq_has_cq(cmd->srq_type)) {
3482
		attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
3483
						cmd->cq_handle, attrs);
3484
		if (!attr.ext.cq) {
3485 3486 3487 3488 3489
			ret = -EINVAL;
			goto err_put_xrcd;
		}
	}

3490
	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
3491 3492 3493
	if (!pd) {
		ret = -EINVAL;
		goto err_put_cq;
3494 3495
	}

3496
	attr.event_handler  = ib_uverbs_srq_event_handler;
3497
	attr.srq_context    = attrs->ufile;
3498 3499 3500 3501
	attr.srq_type       = cmd->srq_type;
	attr.attr.max_wr    = cmd->max_wr;
	attr.attr.max_sge   = cmd->max_sge;
	attr.attr.srq_limit = cmd->srq_limit;
3502

3503 3504
	obj->uevent.events_reported = 0;
	INIT_LIST_HEAD(&obj->uevent.event_list);
3505

3506
	srq = pd->device->create_srq(pd, &attr, udata);
3507 3508
	if (IS_ERR(srq)) {
		ret = PTR_ERR(srq);
3509
		goto err_put;
3510 3511
	}

3512 3513 3514 3515
	srq->device        = pd->device;
	srq->pd            = pd;
	srq->srq_type	   = cmd->srq_type;
	srq->uobject       = &obj->uevent.uobject;
3516 3517
	srq->event_handler = attr.event_handler;
	srq->srq_context   = attr.srq_context;
3518

3519 3520 3521 3522 3523
	if (ib_srq_has_cq(cmd->srq_type)) {
		srq->ext.cq       = attr.ext.cq;
		atomic_inc(&attr.ext.cq->usecnt);
	}

3524 3525 3526 3527 3528
	if (cmd->srq_type == IB_SRQT_XRC) {
		srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
		atomic_inc(&attr.ext.xrc.xrcd->usecnt);
	}

3529 3530 3531
	atomic_inc(&pd->usecnt);
	atomic_set(&srq->usecnt, 0);

3532
	obj->uevent.uobject.object = srq;
3533
	obj->uevent.uobject.user_handle = cmd->user_handle;
3534

3535
	memset(&resp, 0, sizeof resp);
3536
	resp.srq_handle = obj->uevent.uobject.id;
3537 3538
	resp.max_wr     = attr.attr.max_wr;
	resp.max_sge    = attr.attr.max_sge;
3539 3540
	if (cmd->srq_type == IB_SRQT_XRC)
		resp.srqn = srq->ext.xrc.srq_num;
3541

3542 3543
	ret = uverbs_response(attrs, &resp, sizeof(resp));
	if (ret)
3544
		goto err_copy;
3545

3546
	if (cmd->srq_type == IB_SRQT_XRC)
3547
		uobj_put_read(xrcd_uobj);
3548 3549 3550 3551

	if (ib_srq_has_cq(cmd->srq_type))
		uobj_put_obj_read(attr.ext.cq);

3552
	uobj_put_obj_read(pd);
3553
	return uobj_alloc_commit(&obj->uevent.uobject);
3554

3555
err_copy:
3556 3557
	ib_destroy_srq(srq);

3558
err_put:
3559
	uobj_put_obj_read(pd);
3560 3561

err_put_cq:
3562 3563
	if (ib_srq_has_cq(cmd->srq_type))
		uobj_put_obj_read(attr.ext.cq);
3564

3565 3566 3567
err_put_xrcd:
	if (cmd->srq_type == IB_SRQT_XRC) {
		atomic_dec(&obj->uxrcd->refcnt);
3568
		uobj_put_read(xrcd_uobj);
3569
	}
3570

3571
err:
3572
	uobj_alloc_abort(&obj->uevent.uobject);
3573 3574 3575
	return ret;
}

3576 3577
static int ib_uverbs_create_srq(struct uverbs_attr_bundle *attrs,
				const char __user *buf, int in_len, int out_len)
3578 3579 3580
{
	struct ib_uverbs_create_srq      cmd;
	struct ib_uverbs_create_xsrq     xcmd;
3581
	int ret;
3582

3583 3584 3585
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3586

3587
	memset(&xcmd, 0, sizeof(xcmd));
3588 3589 3590 3591 3592 3593 3594 3595
	xcmd.response	 = cmd.response;
	xcmd.user_handle = cmd.user_handle;
	xcmd.srq_type	 = IB_SRQT_BASIC;
	xcmd.pd_handle	 = cmd.pd_handle;
	xcmd.max_wr	 = cmd.max_wr;
	xcmd.max_sge	 = cmd.max_sge;
	xcmd.srq_limit	 = cmd.srq_limit;

3596
	return __uverbs_create_xsrq(attrs, &xcmd, &attrs->driver_udata);
3597 3598
}

3599 3600 3601
static int ib_uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
				 const char __user *buf, int in_len,
				 int out_len)
3602 3603
{
	struct ib_uverbs_create_xsrq     cmd;
3604
	int ret;
3605

3606 3607 3608
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3609

3610
	return __uverbs_create_xsrq(attrs, &cmd, &attrs->driver_udata);
3611 3612
}

3613 3614
static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs,
				const char __user *buf, int in_len, int out_len)
3615 3616 3617 3618 3619 3620
{
	struct ib_uverbs_modify_srq cmd;
	struct ib_srq              *srq;
	struct ib_srq_attr          attr;
	int                         ret;

3621 3622 3623
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3624

3625
	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3626 3627
	if (!srq)
		return -EINVAL;
3628 3629 3630 3631

	attr.max_wr    = cmd.max_wr;
	attr.srq_limit = cmd.srq_limit;

3632 3633
	ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask,
				      &attrs->driver_udata);
3634

3635
	uobj_put_obj_read(srq);
3636

3637
	return ret;
3638 3639
}

3640 3641
static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs,
			       const char __user *buf, int in_len, int out_len)
3642 3643 3644 3645 3646 3647 3648
{
	struct ib_uverbs_query_srq      cmd;
	struct ib_uverbs_query_srq_resp resp;
	struct ib_srq_attr              attr;
	struct ib_srq                   *srq;
	int                             ret;

3649 3650 3651
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3652

3653
	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3654 3655
	if (!srq)
		return -EINVAL;
3656

3657
	ret = ib_query_srq(srq, &attr);
3658

3659
	uobj_put_obj_read(srq);
3660 3661

	if (ret)
3662
		return ret;
3663 3664 3665 3666 3667 3668 3669

	memset(&resp, 0, sizeof resp);

	resp.max_wr    = attr.max_wr;
	resp.max_sge   = attr.max_sge;
	resp.srq_limit = attr.srq_limit;

3670
	return uverbs_response(attrs, &resp, sizeof(resp));
3671 3672
}

3673 3674 3675
static int ib_uverbs_destroy_srq(struct uverbs_attr_bundle *attrs,
				 const char __user *buf, int in_len,
				 int out_len)
3676
{
3677 3678
	struct ib_uverbs_destroy_srq      cmd;
	struct ib_uverbs_destroy_srq_resp resp;
3679 3680
	struct ib_uobject		 *uobj;
	struct ib_uevent_object        	 *obj;
3681
	int ret;
3682

3683 3684 3685
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
	if (ret)
		return ret;
3686

3687
	uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3688 3689 3690
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);

3691
	obj = container_of(uobj, struct ib_uevent_object, uobject);
3692
	memset(&resp, 0, sizeof(resp));
3693
	resp.events_reported = obj->events_reported;
3694 3695 3696

	uobj_put_destroy(uobj);

3697
	return uverbs_response(attrs, &resp, sizeof(resp));
3698
}
3699

3700
static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs,
3701
				     struct ib_udata *ucore)
3702
{
3703
	struct ib_uverbs_ex_query_device_resp resp = {};
3704
	struct ib_uverbs_ex_query_device  cmd;
3705
	struct ib_device_attr attr = {0};
3706 3707
	struct ib_ucontext *ucontext;
	struct ib_device *ib_dev;
3708 3709
	int err;

3710
	ucontext = ib_uverbs_get_ucontext(attrs);
3711 3712 3713 3714
	if (IS_ERR(ucontext))
		return PTR_ERR(ucontext);
	ib_dev = ucontext->device;

3715
	err = uverbs_request(attrs, &cmd, sizeof(cmd));
3716 3717 3718 3719 3720 3721 3722 3723 3724
	if (err)
		return err;

	if (cmd.comp_mask)
		return -EINVAL;

	if (cmd.reserved)
		return -EINVAL;

3725
	err = ib_dev->query_device(ib_dev, &attr, &attrs->driver_udata);
3726 3727 3728
	if (err)
		return err;

3729
	copy_query_dev_fields(ucontext, &resp.base, &attr);
3730

3731 3732 3733 3734 3735 3736 3737 3738 3739
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
	resp.odp_caps.general_caps = attr.odp_caps.general_caps;
	resp.odp_caps.per_transport_caps.rc_odp_caps =
		attr.odp_caps.per_transport_caps.rc_odp_caps;
	resp.odp_caps.per_transport_caps.uc_odp_caps =
		attr.odp_caps.per_transport_caps.uc_odp_caps;
	resp.odp_caps.per_transport_caps.ud_odp_caps =
		attr.odp_caps.per_transport_caps.ud_odp_caps;
#endif
3740 3741 3742

	resp.timestamp_mask = attr.timestamp_mask;
	resp.hca_core_clock = attr.hca_core_clock;
3743
	resp.device_cap_flags_ex = attr.device_cap_flags;
3744 3745 3746 3747 3748 3749
	resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
	resp.rss_caps.max_rwq_indirection_tables =
		attr.rss_caps.max_rwq_indirection_tables;
	resp.rss_caps.max_rwq_indirection_table_size =
		attr.rss_caps.max_rwq_indirection_table_size;
	resp.max_wq_type_rq = attr.max_wq_type_rq;
3750
	resp.raw_packet_caps = attr.raw_packet_caps;
3751 3752 3753 3754 3755
	resp.tm_caps.max_rndv_hdr_size	= attr.tm_caps.max_rndv_hdr_size;
	resp.tm_caps.max_num_tags	= attr.tm_caps.max_num_tags;
	resp.tm_caps.max_ops		= attr.tm_caps.max_ops;
	resp.tm_caps.max_sge		= attr.tm_caps.max_sge;
	resp.tm_caps.flags		= attr.tm_caps.flags;
3756 3757 3758 3759
	resp.cq_moderation_caps.max_cq_moderation_count  =
		attr.cq_caps.max_cq_moderation_count;
	resp.cq_moderation_caps.max_cq_moderation_period =
		attr.cq_caps.max_cq_moderation_period;
3760
	resp.max_dm_size = attr.max_dm_size;
3761 3762
	resp.response_length = uverbs_response_length(attrs, sizeof(resp));

3763
	return uverbs_response(attrs, &resp, sizeof(resp));
3764
}
3765

3766
static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs,
3767
				  struct ib_udata *ucore)
3768
{
3769
	struct ib_uverbs_ex_modify_cq cmd;
3770 3771 3772
	struct ib_cq *cq;
	int ret;

3773
	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3774 3775 3776 3777 3778 3779 3780 3781 3782
	if (ret)
		return ret;

	if (!cmd.attr_mask || cmd.reserved)
		return -EINVAL;

	if (cmd.attr_mask > IB_CQ_MODERATE)
		return -EOPNOTSUPP;

3783
	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
3784 3785 3786
	if (!cq)
		return -EINVAL;

3787
	ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
3788 3789 3790 3791 3792

	uobj_put_obj_read(cq);

	return ret;
}
3793

3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841
/*
 * Describe the input structs for write(). Some write methods have an input
 * only struct, most have an input and output. If the struct has an output then
 * the 'response' u64 must be the first field in the request structure.
 *
 * If udata is present then both the request and response structs have a
 * trailing driver_data flex array. In this case the size of the base struct
 * cannot be changed.
 */
#define offsetof_after(_struct, _member)                                       \
	(offsetof(_struct, _member) + sizeof(((_struct *)NULL)->_member))

#define UAPI_DEF_WRITE_IO(req, resp)                                           \
	.write.has_resp = 1 +                                                  \
			  BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) +    \
			  BUILD_BUG_ON_ZERO(sizeof(((req *)0)->response) !=    \
					    sizeof(u64)),                      \
	.write.req_size = sizeof(req), .write.resp_size = sizeof(resp)

#define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req)

#define UAPI_DEF_WRITE_UDATA_IO(req, resp)                                     \
	UAPI_DEF_WRITE_IO(req, resp),                                          \
		.write.has_udata =                                             \
			1 +                                                    \
			BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=        \
					  sizeof(req)) +                       \
			BUILD_BUG_ON_ZERO(offsetof(resp, driver_data) !=       \
					  sizeof(resp))

#define UAPI_DEF_WRITE_UDATA_I(req)                                            \
	UAPI_DEF_WRITE_I(req),                                                 \
		.write.has_udata =                                             \
			1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=    \
					      sizeof(req))

/*
 * The _EX versions are for use with WRITE_EX and allow the last struct member
 * to be specified. Buffers that do not include that member will be rejected.
 */
#define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member)     \
	.write.has_resp = 1,                                                   \
	.write.req_size = offsetof_after(req, req_last_member),                \
	.write.resp_size = offsetof_after(resp, resp_last_member)

#define UAPI_DEF_WRITE_I_EX(req, req_last_member)                              \
	.write.req_size = offsetof_after(req, req_last_member)

3842
const struct uapi_definition uverbs_def_write_intf[] = {
3843 3844 3845 3846
	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_AH,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_AH,
				     ib_uverbs_create_ah,
3847 3848 3849
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_ah,
					     struct ib_uverbs_create_ah_resp),
3850
				     UAPI_DEF_METHOD_NEEDS_FN(create_ah)),
3851 3852 3853 3854 3855
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DESTROY_AH,
			ib_uverbs_destroy_ah,
			UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_ah))),
3856 3857 3858

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_COMP_CHANNEL,
3859 3860 3861 3862 3863 3864
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL,
			ib_uverbs_create_comp_channel,
			UAPI_DEF_WRITE_IO(
				struct ib_uverbs_create_comp_channel,
				struct ib_uverbs_create_comp_channel_resp))),
3865 3866 3867 3868

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_CQ,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_CQ,
3869
				     ib_uverbs_create_cq,
3870 3871 3872
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_cq,
					     struct ib_uverbs_create_cq_resp),
3873
				     UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DESTROY_CQ,
			ib_uverbs_destroy_cq,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_cq,
					  struct ib_uverbs_destroy_cq_resp),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_cq)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_POLL_CQ,
			ib_uverbs_poll_cq,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_poll_cq,
					  struct ib_uverbs_poll_cq_resp),
			UAPI_DEF_METHOD_NEEDS_FN(poll_cq)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
			ib_uverbs_req_notify_cq,
			UAPI_DEF_WRITE_I(struct ib_uverbs_req_notify_cq),
			UAPI_DEF_METHOD_NEEDS_FN(req_notify_cq)),
3891
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_RESIZE_CQ,
3892
				     ib_uverbs_resize_cq,
3893 3894 3895
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_resize_cq,
					     struct ib_uverbs_resize_cq_resp),
3896
				     UAPI_DEF_METHOD_NEEDS_FN(resize_cq)),
3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_CQ,
			ib_uverbs_ex_create_cq,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_cq,
					     reserved,
					     struct ib_uverbs_ex_create_cq_resp,
					     response_length),
			UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_MODIFY_CQ,
			ib_uverbs_ex_modify_cq,
			UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq),
			UAPI_DEF_METHOD_NEEDS_FN(create_cq))),
3910 3911 3912 3913

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_DEVICE,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_GET_CONTEXT,
3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937
				     ib_uverbs_get_context,
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_get_context,
					     struct ib_uverbs_get_context_resp)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_QUERY_DEVICE,
			ib_uverbs_query_device,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_device,
					  struct ib_uverbs_query_device_resp)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_QUERY_PORT,
			ib_uverbs_query_port,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_port,
					  struct ib_uverbs_query_port_resp),
			UAPI_DEF_METHOD_NEEDS_FN(query_port)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_QUERY_DEVICE,
			ib_uverbs_ex_query_device,
			UAPI_DEF_WRITE_IO_EX(
				struct ib_uverbs_ex_query_device,
				reserved,
				struct ib_uverbs_ex_query_device_resp,
				response_length),
			UAPI_DEF_METHOD_NEEDS_FN(query_device)),
3938 3939
		UAPI_DEF_OBJ_NEEDS_FN(alloc_ucontext),
		UAPI_DEF_OBJ_NEEDS_FN(dealloc_ucontext)),
3940 3941 3942

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_FLOW,
3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_FLOW,
			ib_uverbs_ex_create_flow,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_create_flow,
					     flow_attr,
					     struct ib_uverbs_create_flow_resp,
					     flow_handle),
			UAPI_DEF_METHOD_NEEDS_FN(create_flow)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
			ib_uverbs_ex_destroy_flow,
			UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_flow),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_flow))),
3956 3957 3958 3959 3960

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_MR,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_DEREG_MR,
				     ib_uverbs_dereg_mr,
3961
				     UAPI_DEF_WRITE_I(struct ib_uverbs_dereg_mr),
3962
				     UAPI_DEF_METHOD_NEEDS_FN(dereg_mr)),
3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_REG_MR,
			ib_uverbs_reg_mr,
			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_reg_mr,
						struct ib_uverbs_reg_mr_resp),
			UAPI_DEF_METHOD_NEEDS_FN(reg_user_mr)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_REREG_MR,
			ib_uverbs_rereg_mr,
			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_rereg_mr,
						struct ib_uverbs_rereg_mr_resp),
			UAPI_DEF_METHOD_NEEDS_FN(rereg_user_mr))),
3975 3976 3977

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_MW,
3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_ALLOC_MW,
			ib_uverbs_alloc_mw,
			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_mw,
						struct ib_uverbs_alloc_mw_resp),
			UAPI_DEF_METHOD_NEEDS_FN(alloc_mw)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DEALLOC_MW,
			ib_uverbs_dealloc_mw,
			UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_mw),
			UAPI_DEF_METHOD_NEEDS_FN(dealloc_mw))),
3989 3990 3991

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_PD,
3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_ALLOC_PD,
			ib_uverbs_alloc_pd,
			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_pd,
						struct ib_uverbs_alloc_pd_resp),
			UAPI_DEF_METHOD_NEEDS_FN(alloc_pd)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DEALLOC_PD,
			ib_uverbs_dealloc_pd,
			UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_pd),
			UAPI_DEF_METHOD_NEEDS_FN(dealloc_pd))),
4003 4004 4005

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_QP,
4006 4007 4008 4009 4010 4011
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_ATTACH_MCAST,
			ib_uverbs_attach_mcast,
			UAPI_DEF_WRITE_I(struct ib_uverbs_attach_mcast),
			UAPI_DEF_METHOD_NEEDS_FN(attach_mcast),
			UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
4012
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_QP,
4013
				     ib_uverbs_create_qp,
4014 4015 4016
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_qp,
					     struct ib_uverbs_create_qp_resp),
4017
				     UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DESTROY_QP,
			ib_uverbs_destroy_qp,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_qp,
					  struct ib_uverbs_destroy_qp_resp),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_qp)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DETACH_MCAST,
			ib_uverbs_detach_mcast,
			UAPI_DEF_WRITE_I(struct ib_uverbs_detach_mcast),
			UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_MODIFY_QP,
			ib_uverbs_modify_qp,
			UAPI_DEF_WRITE_I(struct ib_uverbs_modify_qp),
			UAPI_DEF_METHOD_NEEDS_FN(modify_qp)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_POST_RECV,
			ib_uverbs_post_recv,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_post_recv,
					  struct ib_uverbs_post_recv_resp),
			UAPI_DEF_METHOD_NEEDS_FN(post_recv)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_POST_SEND,
			ib_uverbs_post_send,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_post_send,
					  struct ib_uverbs_post_send_resp),
			UAPI_DEF_METHOD_NEEDS_FN(post_send)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_QUERY_QP,
			ib_uverbs_query_qp,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_qp,
					  struct ib_uverbs_query_qp_resp),
			UAPI_DEF_METHOD_NEEDS_FN(query_qp)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_QP,
			ib_uverbs_ex_create_qp,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_qp,
					     comp_mask,
					     struct ib_uverbs_ex_create_qp_resp,
					     response_length),
			UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_MODIFY_QP,
			ib_uverbs_ex_modify_qp,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_modify_qp,
					     base,
					     struct ib_uverbs_ex_modify_qp_resp,
					     response_length),
			UAPI_DEF_METHOD_NEEDS_FN(modify_qp))),
4068 4069 4070

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_RWQ_IND_TBL,
4071 4072 4073
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
			ib_uverbs_ex_create_rwq_ind_table,
4074 4075 4076 4077 4078
			UAPI_DEF_WRITE_IO_EX(
				struct ib_uverbs_ex_create_rwq_ind_table,
				log_ind_tbl_size,
				struct ib_uverbs_ex_create_rwq_ind_table_resp,
				ind_tbl_num),
4079 4080 4081 4082
			UAPI_DEF_METHOD_NEEDS_FN(create_rwq_ind_table)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
			ib_uverbs_ex_destroy_rwq_ind_table,
4083 4084
			UAPI_DEF_WRITE_I(
				struct ib_uverbs_ex_destroy_rwq_ind_table),
4085
			UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))),
4086 4087 4088

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_WQ,
4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_CREATE_WQ,
			ib_uverbs_ex_create_wq,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_wq,
					     max_sge,
					     struct ib_uverbs_ex_create_wq_resp,
					     wqn),
			UAPI_DEF_METHOD_NEEDS_FN(create_wq)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_DESTROY_WQ,
			ib_uverbs_ex_destroy_wq,
			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_destroy_wq,
					     wq_handle,
					     struct ib_uverbs_ex_destroy_wq_resp,
					     reserved),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_wq)),
		DECLARE_UVERBS_WRITE_EX(
			IB_USER_VERBS_EX_CMD_MODIFY_WQ,
			ib_uverbs_ex_modify_wq,
			UAPI_DEF_WRITE_I_EX(struct ib_uverbs_ex_modify_wq,
					    curr_wq_state),
			UAPI_DEF_METHOD_NEEDS_FN(modify_wq))),
4111 4112 4113 4114

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_SRQ,
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_SRQ,
4115
				     ib_uverbs_create_srq,
4116 4117 4118
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_srq,
					     struct ib_uverbs_create_srq_resp),
4119
				     UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
4120
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_XSRQ,
4121
				     ib_uverbs_create_xsrq,
4122 4123 4124
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_create_xsrq,
					     struct ib_uverbs_create_srq_resp),
4125
				     UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_DESTROY_SRQ,
			ib_uverbs_destroy_srq,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_srq,
					  struct ib_uverbs_destroy_srq_resp),
			UAPI_DEF_METHOD_NEEDS_FN(destroy_srq)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_MODIFY_SRQ,
			ib_uverbs_modify_srq,
			UAPI_DEF_WRITE_UDATA_I(struct ib_uverbs_modify_srq),
			UAPI_DEF_METHOD_NEEDS_FN(modify_srq)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_POST_SRQ_RECV,
			ib_uverbs_post_srq_recv,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_post_srq_recv,
					  struct ib_uverbs_post_srq_recv_resp),
			UAPI_DEF_METHOD_NEEDS_FN(post_srq_recv)),
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_QUERY_SRQ,
			ib_uverbs_query_srq,
			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_srq,
					  struct ib_uverbs_query_srq_resp),
			UAPI_DEF_METHOD_NEEDS_FN(query_srq))),
4149 4150 4151

	DECLARE_UVERBS_OBJECT(
		UVERBS_OBJECT_XRCD,
4152 4153 4154 4155 4156
		DECLARE_UVERBS_WRITE(
			IB_USER_VERBS_CMD_CLOSE_XRCD,
			ib_uverbs_close_xrcd,
			UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd),
			UAPI_DEF_METHOD_NEEDS_FN(dealloc_xrcd)),
4157
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP,
4158 4159 4160 4161
				     ib_uverbs_open_qp,
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_open_qp,
					     struct ib_uverbs_create_qp_resp)),
4162 4163
		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_XRCD,
				     ib_uverbs_open_xrcd,
4164 4165 4166
				     UAPI_DEF_WRITE_UDATA_IO(
					     struct ib_uverbs_open_xrcd,
					     struct ib_uverbs_open_xrcd_resp),
4167
				     UAPI_DEF_METHOD_NEEDS_FN(alloc_xrcd))),
4168 4169 4170

	{},
};