uverbs_ioctl.c 18.4 KB
Newer Older
M
Matan Barak 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
/*
 * Copyright (c) 2017, Mellanox Technologies inc.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <rdma/rdma_user_ioctl.h>
#include <rdma/uverbs_ioctl.h>
#include "rdma_core.h"
#include "uverbs.h"

38 39 40 41 42
struct bundle_alloc_head {
	struct bundle_alloc_head *next;
	u8 data[];
};

43
struct bundle_priv {
44 45 46 47 48 49
	/* Must be first */
	struct bundle_alloc_head alloc_head;
	struct bundle_alloc_head *allocated_mem;
	size_t internal_avail;
	size_t internal_used;

50 51 52 53 54 55 56 57 58
	struct ib_uverbs_attr __user *user_attrs;
	struct ib_uverbs_attr *uattrs;
	struct uverbs_obj_attr *destroy_attr;

	/*
	 * Must be last. bundle ends in a flex array which overlaps
	 * internal_buffer.
	 */
	struct uverbs_attr_bundle bundle;
59
	u64 internal_buffer[32];
60 61
};

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
/**
 * uverbs_alloc() - Quickly allocate memory for use with a bundle
 * @bundle: The bundle
 * @size: Number of bytes to allocate
 * @flags: Allocator flags
 *
 * The bundle allocator is intended for allocations that are connected with
 * processing the system call related to the bundle. The allocated memory is
 * always freed once the system call completes, and cannot be freed any other
 * way.
 *
 * This tries to use a small pool of pre-allocated memory for performance.
 */
__malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
			     gfp_t flags)
{
	struct bundle_priv *pbundle =
		container_of(bundle, struct bundle_priv, bundle);
	size_t new_used;
	void *res;

	if (check_add_overflow(size, pbundle->internal_used, &new_used))
		return ERR_PTR(-EINVAL);

	if (new_used > pbundle->internal_avail) {
		struct bundle_alloc_head *buf;

		buf = kvmalloc(struct_size(buf, data, size), flags);
		if (!buf)
			return ERR_PTR(-ENOMEM);
		buf->next = pbundle->allocated_mem;
		pbundle->allocated_mem = buf;
		return buf->data;
	}

	res = (void *)pbundle->internal_buffer + pbundle->internal_used;
	pbundle->internal_used =
		ALIGN(new_used, sizeof(*pbundle->internal_buffer));
	if (flags & __GFP_ZERO)
		memset(res, 0, size);
	return res;
}
EXPORT_SYMBOL(_uverbs_alloc);

106 107 108 109 110 111 112 113 114 115 116
static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
				   u16 len)
{
	if (uattr->len > sizeof(((struct ib_uverbs_attr *)0)->data))
		return ib_is_buffer_cleared(u64_to_user_ptr(uattr->data) + len,
					    uattr->len - len);

	return !memchr_inv((const void *)&uattr->data + len,
			   0, uattr->len - len);
}

117
static int uverbs_process_attr(struct bundle_priv *pbundle,
M
Matan Barak 已提交
118 119 120 121 122 123 124
			       const struct ib_uverbs_attr *uattr,
			       u16 attr_id,
			       const struct uverbs_attr_spec_hash *attr_spec_bucket,
			       struct uverbs_attr_bundle_hash *attr_bundle_h,
			       struct ib_uverbs_attr __user *uattr_ptr)
{
	const struct uverbs_attr_spec *spec;
125
	const struct uverbs_attr_spec *val_spec;
M
Matan Barak 已提交
126 127 128 129 130 131 132 133 134 135 136
	struct uverbs_attr *e;
	struct uverbs_obj_attr *o_attr;
	struct uverbs_attr *elements = attr_bundle_h->attrs;

	if (attr_id >= attr_spec_bucket->num_attrs) {
		if (uattr->flags & UVERBS_ATTR_F_MANDATORY)
			return -EINVAL;
		else
			return 0;
	}

137 138 139
	if (test_bit(attr_id, attr_bundle_h->valid_bitmap))
		return -EINVAL;

M
Matan Barak 已提交
140
	spec = &attr_spec_bucket->attrs[attr_id];
141
	val_spec = spec;
M
Matan Barak 已提交
142 143 144
	e = &elements[attr_id];

	switch (spec->type) {
145
	case UVERBS_ATTR_TYPE_ENUM_IN:
146
		if (uattr->attr_data.enum_data.elem_id >= spec->u.enum_def.num_elems)
147 148 149 150 151
			return -EOPNOTSUPP;

		if (uattr->attr_data.enum_data.reserved)
			return -EINVAL;

152
		val_spec = &spec->u2.enum_def.ids[uattr->attr_data.enum_data.elem_id];
153 154 155 156 157 158 159

		/* Currently we only support PTR_IN based enums */
		if (val_spec->type != UVERBS_ATTR_TYPE_PTR_IN)
			return -EOPNOTSUPP;

		e->ptr_attr.enum_id = uattr->attr_data.enum_data.elem_id;
	/* fall through */
M
Matan Barak 已提交
160
	case UVERBS_ATTR_TYPE_PTR_IN:
161 162 163 164 165
		/* Ensure that any data provided by userspace beyond the known
		 * struct is zero. Userspace that knows how to use some future
		 * longer struct will fail here if used with an old kernel and
		 * non-zero content, making ABI compat/discovery simpler.
		 */
166
		if (uattr->len > val_spec->u.ptr.len &&
167
		    val_spec->zero_trailing &&
168
		    !uverbs_is_attr_cleared(uattr, val_spec->u.ptr.len))
169 170 171
			return -EOPNOTSUPP;

	/* fall through */
M
Matan Barak 已提交
172
	case UVERBS_ATTR_TYPE_PTR_OUT:
173
		if (uattr->len < val_spec->u.ptr.min_len ||
174
		    (!val_spec->zero_trailing &&
175
		     uattr->len > val_spec->u.ptr.len))
176 177 178 179
			return -EINVAL;

		if (spec->type != UVERBS_ATTR_TYPE_ENUM_IN &&
		    uattr->attr_data.reserved)
M
Matan Barak 已提交
180 181
			return -EINVAL;

182
		e->ptr_attr.uattr_idx = uattr - pbundle->uattrs;
M
Matan Barak 已提交
183
		e->ptr_attr.len = uattr->len;
184

J
Jason Gunthorpe 已提交
185
		if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) {
186 187
			void *p;

188 189 190
			p = uverbs_alloc(&pbundle->bundle, uattr->len);
			if (IS_ERR(p))
				return PTR_ERR(p);
191 192 193 194

			e->ptr_attr.ptr = p;

			if (copy_from_user(p, u64_to_user_ptr(uattr->data),
195
					   uattr->len))
196 197 198 199
				return -EFAULT;
		} else {
			e->ptr_attr.data = uattr->data;
		}
M
Matan Barak 已提交
200 201 202 203
		break;

	case UVERBS_ATTR_TYPE_IDR:
	case UVERBS_ATTR_TYPE_FD:
204 205 206
		if (uattr->attr_data.reserved)
			return -EINVAL;

207
		if (uattr->len != 0)
M
Matan Barak 已提交
208 209 210 211
			return -EINVAL;

		o_attr = &e->obj_attr;

212 213
		/* specs are allowed to have only one destroy attribute */
		WARN_ON(spec->u.obj.access == UVERBS_ACCESS_DESTROY &&
214
			pbundle->destroy_attr);
215
		if (spec->u.obj.access == UVERBS_ACCESS_DESTROY)
216
			pbundle->destroy_attr = o_attr;
217

218 219 220 221 222 223
		/*
		 * The type of uattr->data is u64 for UVERBS_ATTR_TYPE_IDR and
		 * s64 for UVERBS_ATTR_TYPE_FD. We can cast the u64 to s64
		 * here without caring about truncation as we know that the
		 * IDR implementation today rejects negative IDs
		 */
224
		o_attr->uobject = uverbs_get_uobject_from_file(
225
					spec->u.obj.obj_type,
226
					pbundle->bundle.ufile,
227
					spec->u.obj.access,
228
					uattr->data_s64);
M
Matan Barak 已提交
229 230 231 232

		if (IS_ERR(o_attr->uobject))
			return PTR_ERR(o_attr->uobject);

233
		if (spec->u.obj.access == UVERBS_ACCESS_NEW) {
234
			s64 id = o_attr->uobject->id;
M
Matan Barak 已提交
235 236

			/* Copy the allocated id to the user-space */
237
			if (put_user(id, &uattr_ptr->data)) {
M
Matan Barak 已提交
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
				uverbs_finalize_object(o_attr->uobject,
						       UVERBS_ACCESS_NEW,
						       false);
				return -EFAULT;
			}
		}

		break;
	default:
		return -EOPNOTSUPP;
	}

	set_bit(attr_id, attr_bundle_h->valid_bitmap);
	return 0;
}

254
static int uverbs_finalize_attrs(struct bundle_priv *pbundle,
255 256 257
				 struct uverbs_attr_spec_hash *const *spec_hash,
				 size_t num, bool commit)
{
258
	struct uverbs_attr_bundle *attrs_bundle = &pbundle->bundle;
259 260 261 262 263 264 265 266 267 268
	unsigned int i;
	int ret = 0;

	for (i = 0; i < num; i++) {
		struct uverbs_attr_bundle_hash *curr_bundle =
			&attrs_bundle->hash[i];
		const struct uverbs_attr_spec_hash *curr_spec_bucket =
			spec_hash[i];
		unsigned int j;

269 270 271
		if (!curr_spec_bucket)
			continue;

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
		for (j = 0; j < curr_bundle->num_attrs; j++) {
			struct uverbs_attr *attr;
			const struct uverbs_attr_spec *spec;

			if (!uverbs_attr_is_valid_in_hash(curr_bundle, j))
				continue;

			attr = &curr_bundle->attrs[j];
			spec = &curr_spec_bucket->attrs[j];

			if (spec->type == UVERBS_ATTR_TYPE_IDR ||
			    spec->type == UVERBS_ATTR_TYPE_FD) {
				int current_ret;

				current_ret = uverbs_finalize_object(
					attr->obj_attr.uobject,
288
					spec->u.obj.access, commit);
289 290 291 292 293 294 295 296
				if (!ret)
					ret = current_ret;
			}
		}
	}
	return ret;
}

297
static int uverbs_uattrs_process(size_t num_uattrs,
M
Matan Barak 已提交
298
				 const struct uverbs_method_spec *method,
299
				 struct bundle_priv *pbundle)
M
Matan Barak 已提交
300
{
301 302
	struct uverbs_attr_bundle *attr_bundle = &pbundle->bundle;
	struct ib_uverbs_attr __user *uattr_ptr = pbundle->user_attrs;
M
Matan Barak 已提交
303 304 305 306 307
	size_t i;
	int ret = 0;
	int num_given_buckets = 0;

	for (i = 0; i < num_uattrs; i++) {
308
		const struct ib_uverbs_attr *uattr = &pbundle->uattrs[i];
M
Matan Barak 已提交
309 310 311 312
		u16 attr_id = uattr->attr_id;
		struct uverbs_attr_spec_hash *attr_spec_bucket;

		ret = uverbs_ns_idx(&attr_id, method->num_buckets);
313
		if (ret < 0 || !method->attr_buckets[ret]) {
M
Matan Barak 已提交
314
			if (uattr->flags & UVERBS_ATTR_F_MANDATORY) {
315
				uverbs_finalize_attrs(pbundle,
316 317 318
						      method->attr_buckets,
						      num_given_buckets,
						      false);
M
Matan Barak 已提交
319 320 321 322 323 324 325 326 327 328 329 330 331
				return ret;
			}
			continue;
		}

		/*
		 * ret is the found ns, so increase num_given_buckets if
		 * necessary.
		 */
		if (ret >= num_given_buckets)
			num_given_buckets = ret + 1;

		attr_spec_bucket = method->attr_buckets[ret];
332 333
		ret = uverbs_process_attr(pbundle,
					  uattr, attr_id,
334
					  attr_spec_bucket,
335
					  &attr_bundle->hash[ret],
336
					  uattr_ptr++);
M
Matan Barak 已提交
337
		if (ret) {
338
			uverbs_finalize_attrs(pbundle,
339 340 341
					      method->attr_buckets,
					      num_given_buckets,
					      false);
M
Matan Barak 已提交
342 343 344 345 346 347 348 349
			return ret;
		}
	}

	return num_given_buckets;
}

static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *method_spec,
350
					    struct bundle_priv *pbundle)
M
Matan Barak 已提交
351
{
352
	struct uverbs_attr_bundle *attr_bundle = &pbundle->bundle;
M
Matan Barak 已提交
353 354 355 356 357 358
	unsigned int i;

	for (i = 0; i < attr_bundle->num_buckets; i++) {
		struct uverbs_attr_spec_hash *attr_spec_bucket =
			method_spec->attr_buckets[i];

359 360 361
		if (!attr_spec_bucket)
			continue;

M
Matan Barak 已提交
362 363 364 365 366 367
		if (!bitmap_subset(attr_spec_bucket->mandatory_attrs_bitmask,
				   attr_bundle->hash[i].valid_bitmap,
				   attr_spec_bucket->num_attrs))
			return -EINVAL;
	}

368 369 370 371 372 373 374 375 376
	for (; i < method_spec->num_buckets; i++) {
		struct uverbs_attr_spec_hash *attr_spec_bucket =
			method_spec->attr_buckets[i];

		if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask,
				  attr_spec_bucket->num_attrs))
			return -EINVAL;
	}

M
Matan Barak 已提交
377 378 379
	return 0;
}

380
static int uverbs_handle_method(size_t num_uattrs,
M
Matan Barak 已提交
381
				const struct uverbs_method_spec *method_spec,
382
				struct bundle_priv *pbundle)
M
Matan Barak 已提交
383
{
384
	struct uverbs_attr_bundle *attr_bundle = &pbundle->bundle;
M
Matan Barak 已提交
385 386 387 388
	int ret;
	int finalize_ret;
	int num_given_buckets;

389
	num_given_buckets =
390
		uverbs_uattrs_process(num_uattrs, method_spec, pbundle);
M
Matan Barak 已提交
391 392 393 394
	if (num_given_buckets <= 0)
		return -EINVAL;

	attr_bundle->num_buckets = num_given_buckets;
395
	ret = uverbs_validate_kernel_mandatory(method_spec, pbundle);
M
Matan Barak 已提交
396 397 398
	if (ret)
		goto cleanup;

399 400 401 402
	/*
	 * We destroy the HW object before invoking the handler, handlers do
	 * not get to manipulate the HW objects.
	 */
403 404
	if (pbundle->destroy_attr) {
		ret = uobj_destroy(pbundle->destroy_attr->uobject);
405 406 407 408
		if (ret)
			goto cleanup;
	}

409
	ret = method_spec->handler(pbundle->bundle.ufile, attr_bundle);
410

411 412 413
	if (pbundle->destroy_attr) {
		uobj_put_destroy(pbundle->destroy_attr->uobject);
		pbundle->destroy_attr->uobject = NULL;
414 415
	}

M
Matan Barak 已提交
416
cleanup:
417
	finalize_ret = uverbs_finalize_attrs(pbundle,
418 419 420
					     method_spec->attr_buckets,
					     attr_bundle->num_buckets,
					     !ret);
M
Matan Barak 已提交
421 422 423 424

	return ret ? ret : finalize_ret;
}

425 426 427 428 429 430 431 432 433 434 435 436
static void bundle_destroy(struct bundle_priv *pbundle)
{
	struct bundle_alloc_head *memblock;

	for (memblock = pbundle->allocated_mem; memblock;) {
		struct bundle_alloc_head *tmp = memblock;

		memblock = memblock->next;
		kvfree(tmp);
	}
}

M
Matan Barak 已提交
437 438 439
static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
				struct ib_uverbs_file *file,
				struct ib_uverbs_ioctl_hdr *hdr,
440
				struct ib_uverbs_attr __user *user_attrs)
M
Matan Barak 已提交
441 442 443 444 445
{
	const struct uverbs_object_spec *object_spec;
	const struct uverbs_method_spec *method_spec;
	long err = 0;
	unsigned int i;
446
	struct bundle_priv onstack_pbundle;
447
	struct bundle_priv *ctx;
M
Matan Barak 已提交
448 449 450 451
	struct uverbs_attr *curr_attr;
	unsigned long *curr_bitmap;
	size_t ctx_size;

452 453 454
	if (hdr->driver_id != ib_dev->driver_id)
		return -EINVAL;

455
	object_spec = uverbs_get_object(file, hdr->object_id);
M
Matan Barak 已提交
456
	if (!object_spec)
457
		return -EPROTONOSUPPORT;
M
Matan Barak 已提交
458 459 460

	method_spec = uverbs_get_method(object_spec, hdr->method_id);
	if (!method_spec)
461
		return -EPROTONOSUPPORT;
M
Matan Barak 已提交
462

463
	ctx_size = sizeof(*ctx) - sizeof(ctx->internal_buffer) +
M
Matan Barak 已提交
464 465
		   sizeof(struct uverbs_attr_bundle_hash) * method_spec->num_buckets +
		   sizeof(*ctx->uattrs) * hdr->num_attrs +
466
		   sizeof(*ctx->bundle.hash[0].attrs) *
M
Matan Barak 已提交
467
		   method_spec->num_child_attrs +
468
		   sizeof(*ctx->bundle.hash[0].valid_bitmap) *
M
Matan Barak 已提交
469 470 471
			(method_spec->num_child_attrs / BITS_PER_LONG +
			 method_spec->num_buckets);

472 473 474 475 476 477 478
	if (ctx_size <= sizeof(onstack_pbundle)) {
		ctx = &onstack_pbundle;
		ctx->internal_avail =
			sizeof(onstack_pbundle) -
			offsetof(struct bundle_priv, internal_buffer);
		ctx->allocated_mem = NULL;
	} else {
479
		ctx = kmalloc(ctx_size, GFP_KERNEL);
480 481 482 483 484 485
		if (!ctx)
			return -ENOMEM;
		ctx->internal_avail = 0;
		ctx->alloc_head.next = NULL;
		ctx->allocated_mem = &ctx->alloc_head;
	}
M
Matan Barak 已提交
486

487 488
	ctx->uattrs = (void *)(ctx + 1) +
		      (sizeof(ctx->bundle.hash[0]) * method_spec->num_buckets);
M
Matan Barak 已提交
489 490
	curr_attr = (void *)(ctx->uattrs + hdr->num_attrs);
	curr_bitmap = (void *)(curr_attr + method_spec->num_child_attrs);
491
	ctx->internal_used = ALIGN(ctx_size, sizeof(*ctx->internal_buffer));
M
Matan Barak 已提交
492 493 494 495 496 497

	/*
	 * We just fill the pointers and num_attrs here. The data itself will be
	 * filled at a later stage (uverbs_process_attr)
	 */
	for (i = 0; i < method_spec->num_buckets; i++) {
498 499 500 501 502 503
		unsigned int curr_num_attrs;

		if (!method_spec->attr_buckets[i])
			continue;

		curr_num_attrs = method_spec->attr_buckets[i]->num_attrs;
M
Matan Barak 已提交
504

505
		ctx->bundle.hash[i].attrs = curr_attr;
M
Matan Barak 已提交
506
		curr_attr += curr_num_attrs;
507 508
		ctx->bundle.hash[i].num_attrs = curr_num_attrs;
		ctx->bundle.hash[i].valid_bitmap = curr_bitmap;
M
Matan Barak 已提交
509 510 511 512
		bitmap_zero(curr_bitmap, curr_num_attrs);
		curr_bitmap += BITS_TO_LONGS(curr_num_attrs);
	}

513
	err = copy_from_user(ctx->uattrs, user_attrs,
M
Matan Barak 已提交
514 515 516 517 518 519
			     sizeof(*ctx->uattrs) * hdr->num_attrs);
	if (err) {
		err = -EFAULT;
		goto out;
	}

520 521 522 523
	ctx->destroy_attr = NULL;
	ctx->bundle.ufile = file;
	ctx->user_attrs = user_attrs;
	err = uverbs_handle_method(hdr->num_attrs, method_spec, ctx);
524 525 526 527 528 529 530 531 532 533

	/*
	 * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
	 * not invoke the method because the request is not supported.  No
	 * other cases should return this code.
	*/
	if (unlikely(err == -EPROTONOSUPPORT)) {
		WARN_ON_ONCE(err == -EPROTONOSUPPORT);
		err = -EINVAL;
	}
M
Matan Barak 已提交
534
out:
535
	bundle_destroy(ctx);
M
Matan Barak 已提交
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
	return err;
}

#define IB_UVERBS_MAX_CMD_SZ 4096

long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct ib_uverbs_file *file = filp->private_data;
	struct ib_uverbs_ioctl_hdr __user *user_hdr =
		(struct ib_uverbs_ioctl_hdr __user *)arg;
	struct ib_uverbs_ioctl_hdr hdr;
	struct ib_device *ib_dev;
	int srcu_key;
	long err;

	srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
	ib_dev = srcu_dereference(file->device->ib_dev,
				  &file->device->disassociate_srcu);
	if (!ib_dev) {
		err = -EIO;
		goto out;
	}

	if (cmd == RDMA_VERBS_IOCTL) {
		err = copy_from_user(&hdr, user_hdr, sizeof(hdr));

		if (err || hdr.length > IB_UVERBS_MAX_CMD_SZ ||
		    hdr.length != sizeof(hdr) + hdr.num_attrs * sizeof(struct ib_uverbs_attr)) {
			err = -EINVAL;
			goto out;
		}

568
		if (hdr.reserved1 || hdr.reserved2) {
569
			err = -EPROTONOSUPPORT;
M
Matan Barak 已提交
570 571 572
			goto out;
		}

573
		err = ib_uverbs_cmd_verbs(ib_dev, file, &hdr, user_hdr->attrs);
M
Matan Barak 已提交
574 575 576 577 578 579 580 581
	} else {
		err = -ENOIOCTLCMD;
	}
out:
	srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);

	return err;
}
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603

int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
		       size_t idx, u64 allowed_bits)
{
	const struct uverbs_attr *attr;
	u64 flags;

	attr = uverbs_attr_get(attrs_bundle, idx);
	/* Missing attribute means 0 flags */
	if (IS_ERR(attr)) {
		*to = 0;
		return 0;
	}

	/*
	 * New userspace code should use 8 bytes to pass flags, but we
	 * transparently support old userspaces that were using 4 bytes as
	 * well.
	 */
	if (attr->ptr_attr.len == 8)
		flags = attr->ptr_attr.data;
	else if (attr->ptr_attr.len == 4)
604
		flags = *(u32 *)&attr->ptr_attr.data;
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
	else
		return -EINVAL;

	if (flags & ~allowed_bits)
		return -EINVAL;

	*to = flags;
	return 0;
}
EXPORT_SYMBOL(uverbs_get_flags64);

int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
		       size_t idx, u64 allowed_bits)
{
	u64 flags;
	int ret;

	ret = uverbs_get_flags64(&flags, attrs_bundle, idx, allowed_bits);
	if (ret)
		return ret;

	if (flags > U32_MAX)
		return -EINVAL;
	*to = flags;

	return 0;
}
EXPORT_SYMBOL(uverbs_get_flags32);
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694

/*
 * This is for ease of conversion. The purpose is to convert all drivers to
 * use uverbs_attr_bundle instead of ib_udata.  Assume attr == 0 is input and
 * attr == 1 is output.
 */
void create_udata(struct uverbs_attr_bundle *bundle, struct ib_udata *udata)
{
	struct bundle_priv *pbundle =
		container_of(bundle, struct bundle_priv, bundle);
	const struct uverbs_attr *uhw_in =
		uverbs_attr_get(bundle, UVERBS_ATTR_UHW_IN);
	const struct uverbs_attr *uhw_out =
		uverbs_attr_get(bundle, UVERBS_ATTR_UHW_OUT);

	if (!IS_ERR(uhw_in)) {
		udata->inlen = uhw_in->ptr_attr.len;
		if (uverbs_attr_ptr_is_inline(uhw_in))
			udata->inbuf =
				&pbundle->user_attrs[uhw_in->ptr_attr.uattr_idx]
					 .data;
		else
			udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
	} else {
		udata->inbuf = NULL;
		udata->inlen = 0;
	}

	if (!IS_ERR(uhw_out)) {
		udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
		udata->outlen = uhw_out->ptr_attr.len;
	} else {
		udata->outbuf = NULL;
		udata->outlen = 0;
	}
}

int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
		   const void *from, size_t size)
{
	struct bundle_priv *pbundle =
		container_of(bundle, struct bundle_priv, bundle);
	const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
	u16 flags;
	size_t min_size;

	if (IS_ERR(attr))
		return PTR_ERR(attr);

	min_size = min_t(size_t, attr->ptr_attr.len, size);
	if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
		return -EFAULT;

	flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
		UVERBS_ATTR_F_VALID_OUTPUT;
	if (put_user(flags,
		     &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
		return -EFAULT;

	return 0;
}
EXPORT_SYMBOL(uverbs_copy_to);