mr.c 48.1 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */


#include <linux/kref.h>
#include <linux/random.h>
#include <linux/debugfs.h>
#include <linux/export.h>
E
Eli Cohen 已提交
38
#include <linux/delay.h>
39
#include <rdma/ib_umem.h>
40
#include <rdma/ib_umem_odp.h>
41
#include <rdma/ib_verbs.h>
42 43 44
#include "mlx5_ib.h"

enum {
E
Eli Cohen 已提交
45
	MAX_PENDING_REG_MR = 8,
46 47
};

48
#define MLX5_UMR_ALIGN 2048
49

50 51
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
52
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
53
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
{
	return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
}

static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
	return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
}

static bool use_umr(struct mlx5_ib_dev *dev, int order)
{
	return order <= mr_cache_max_order(dev) &&
		umr_can_modify_entity_size(dev);
}
69

70 71
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
72
	int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
73 74 75 76 77 78 79 80 81

#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
	/* Wait until all page fault handlers using the mr complete. */
	synchronize_srcu(&dev->mr_srcu);
#endif

	return err;
}

82 83 84 85 86 87 88 89 90 91
static int order2idx(struct mlx5_ib_dev *dev, int order)
{
	struct mlx5_mr_cache *cache = &dev->cache;

	if (order < cache->ent[0].order)
		return 0;
	else
		return order - cache->ent[0].order;
}

92 93 94 95 96 97
static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
{
	return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
		length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
}

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
static void update_odp_mr(struct mlx5_ib_mr *mr)
{
	if (mr->umem->odp_data) {
		/*
		 * This barrier prevents the compiler from moving the
		 * setting of umem->odp_data->private to point to our
		 * MR, before reg_umr finished, to ensure that the MR
		 * initialization have finished before starting to
		 * handle invalidations.
		 */
		smp_wmb();
		mr->umem->odp_data->private = mr;
		/*
		 * Make sure we will see the new
		 * umem->odp_data->private value in the invalidation
		 * routines, before we can get page faults on the
		 * MR. Page faults can happen once we put the MR in
		 * the tree, below this line. Without the barrier,
		 * there can be a fault handling and an invalidation
		 * before umem->odp_data->private == mr is visible to
		 * the invalidation handler.
		 */
		smp_wmb();
	}
}
#endif

E
Eli Cohen 已提交
126 127 128 129 130 131 132 133 134
static void reg_mr_callback(int status, void *context)
{
	struct mlx5_ib_mr *mr = context;
	struct mlx5_ib_dev *dev = mr->dev;
	struct mlx5_mr_cache *cache = &dev->cache;
	int c = order2idx(dev, mr->order);
	struct mlx5_cache_ent *ent = &cache->ent[c];
	u8 key;
	unsigned long flags;
135
	struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
136
	int err;
E
Eli Cohen 已提交
137 138 139 140 141 142 143 144 145 146 147 148

	spin_lock_irqsave(&ent->lock, flags);
	ent->pending--;
	spin_unlock_irqrestore(&ent->lock, flags);
	if (status) {
		mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
		kfree(mr);
		dev->fill_delay = 1;
		mod_timer(&dev->delay_timer, jiffies + HZ);
		return;
	}

A
Artemy Kovalyov 已提交
149
	mr->mmkey.type = MLX5_MKEY_MR;
150 151 152
	spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
	key = dev->mdev->priv.mkey_key++;
	spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
153
	mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
E
Eli Cohen 已提交
154 155 156 157 158 159 160 161

	cache->last_add = jiffies;

	spin_lock_irqsave(&ent->lock, flags);
	list_add_tail(&mr->list, &ent->head);
	ent->cur++;
	ent->size++;
	spin_unlock_irqrestore(&ent->lock, flags);
162 163

	write_lock_irqsave(&table->lock, flags);
164 165
	err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
				&mr->mmkey);
166
	if (err)
167
		pr_err("Error inserting to mkey tree. 0x%x\n", -err);
168
	write_unlock_irqrestore(&table->lock, flags);
169 170 171

	if (!completion_done(&ent->compl))
		complete(&ent->compl);
E
Eli Cohen 已提交
172 173
}

174 175 176 177
static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent = &cache->ent[c];
178
	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
179
	struct mlx5_ib_mr *mr;
180 181
	void *mkc;
	u32 *in;
182 183 184
	int err = 0;
	int i;

185
	in = kzalloc(inlen, GFP_KERNEL);
186 187 188
	if (!in)
		return -ENOMEM;

189
	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
190
	for (i = 0; i < num; i++) {
E
Eli Cohen 已提交
191 192 193 194 195
		if (ent->pending >= MAX_PENDING_REG_MR) {
			err = -EAGAIN;
			break;
		}

196 197 198
		mr = kzalloc(sizeof(*mr), GFP_KERNEL);
		if (!mr) {
			err = -ENOMEM;
E
Eli Cohen 已提交
199
			break;
200 201
		}
		mr->order = ent->order;
202
		mr->allocated_from_cache = 1;
E
Eli Cohen 已提交
203
		mr->dev = dev;
204 205 206

		MLX5_SET(mkc, mkc, free, 1);
		MLX5_SET(mkc, mkc, umr_en, 1);
207 208 209
		MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
		MLX5_SET(mkc, mkc, access_mode_4_2,
			 (ent->access_mode >> 2) & 0x7);
210 211

		MLX5_SET(mkc, mkc, qpn, 0xffffff);
212 213
		MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
		MLX5_SET(mkc, mkc, log_page_size, ent->page);
214

E
Eli Cohen 已提交
215 216 217
		spin_lock_irq(&ent->lock);
		ent->pending++;
		spin_unlock_irq(&ent->lock);
218 219 220 221
		err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
					       in, inlen,
					       mr->out, sizeof(mr->out),
					       reg_mr_callback, mr);
222
		if (err) {
E
Eli Cohen 已提交
223 224 225
			spin_lock_irq(&ent->lock);
			ent->pending--;
			spin_unlock_irq(&ent->lock);
226 227
			mlx5_ib_warn(dev, "create mkey failed %d\n", err);
			kfree(mr);
E
Eli Cohen 已提交
228
			break;
229 230 231 232 233 234 235 236 237 238 239
		}
	}

	kfree(in);
	return err;
}

static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent = &cache->ent[c];
240
	struct mlx5_ib_mr *tmp_mr;
241
	struct mlx5_ib_mr *mr;
242
	LIST_HEAD(del_list);
243 244 245
	int i;

	for (i = 0; i < num; i++) {
E
Eli Cohen 已提交
246
		spin_lock_irq(&ent->lock);
247
		if (list_empty(&ent->head)) {
E
Eli Cohen 已提交
248
			spin_unlock_irq(&ent->lock);
249
			break;
250 251
		}
		mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
252
		list_move(&mr->list, &del_list);
253 254
		ent->cur--;
		ent->size--;
E
Eli Cohen 已提交
255
		spin_unlock_irq(&ent->lock);
256 257 258 259 260 261 262 263 264 265
		mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
	}

#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
	synchronize_srcu(&dev->mr_srcu);
#endif

	list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
		list_del(&mr->list);
		kfree(mr);
266 267 268 269 270 271 272 273
	}
}

static ssize_t size_write(struct file *filp, const char __user *buf,
			  size_t count, loff_t *pos)
{
	struct mlx5_cache_ent *ent = filp->private_data;
	struct mlx5_ib_dev *dev = ent->dev;
274
	char lbuf[20] = {0};
275 276 277 278
	u32 var;
	int err;
	int c;

279 280
	count = min(count, sizeof(lbuf) - 1);
	if (copy_from_user(lbuf, buf, count))
281
		return -EFAULT;
282 283 284 285 286 287 288 289 290 291

	c = order2idx(dev, ent->order);

	if (sscanf(lbuf, "%u", &var) != 1)
		return -EINVAL;

	if (var < ent->limit)
		return -EINVAL;

	if (var > ent->size) {
E
Eli Cohen 已提交
292 293 294 295 296 297 298
		do {
			err = add_keys(dev, c, var - ent->size);
			if (err && err != -EAGAIN)
				return err;

			usleep_range(3000, 5000);
		} while (err);
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
	} else if (var < ent->size) {
		remove_keys(dev, c, ent->size - var);
	}

	return count;
}

static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
			 loff_t *pos)
{
	struct mlx5_cache_ent *ent = filp->private_data;
	char lbuf[20];
	int err;

	err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
	if (err < 0)
		return err;

317
	return simple_read_from_buffer(buf, count, pos, lbuf, err);
318 319 320 321 322 323 324 325 326 327 328 329 330 331
}

static const struct file_operations size_fops = {
	.owner	= THIS_MODULE,
	.open	= simple_open,
	.write	= size_write,
	.read	= size_read,
};

static ssize_t limit_write(struct file *filp, const char __user *buf,
			   size_t count, loff_t *pos)
{
	struct mlx5_cache_ent *ent = filp->private_data;
	struct mlx5_ib_dev *dev = ent->dev;
332
	char lbuf[20] = {0};
333 334 335 336
	u32 var;
	int err;
	int c;

337 338
	count = min(count, sizeof(lbuf) - 1);
	if (copy_from_user(lbuf, buf, count))
339
		return -EFAULT;
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370

	c = order2idx(dev, ent->order);

	if (sscanf(lbuf, "%u", &var) != 1)
		return -EINVAL;

	if (var > ent->size)
		return -EINVAL;

	ent->limit = var;

	if (ent->cur < ent->limit) {
		err = add_keys(dev, c, 2 * ent->limit - ent->cur);
		if (err)
			return err;
	}

	return count;
}

static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
			  loff_t *pos)
{
	struct mlx5_cache_ent *ent = filp->private_data;
	char lbuf[20];
	int err;

	err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
	if (err < 0)
		return err;

371
	return simple_read_from_buffer(buf, count, pos, lbuf, err);
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
}

static const struct file_operations limit_fops = {
	.owner	= THIS_MODULE,
	.open	= simple_open,
	.write	= limit_write,
	.read	= limit_read,
};

static int someone_adding(struct mlx5_mr_cache *cache)
{
	int i;

	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
		if (cache->ent[i].cur < cache->ent[i].limit)
			return 1;
	}

	return 0;
}

static void __cache_work_func(struct mlx5_cache_ent *ent)
{
	struct mlx5_ib_dev *dev = ent->dev;
	struct mlx5_mr_cache *cache = &dev->cache;
	int i = order2idx(dev, ent->order);
E
Eli Cohen 已提交
398
	int err;
399 400 401 402 403

	if (cache->stopped)
		return;

	ent = &dev->cache.ent[i];
E
Eli Cohen 已提交
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
	if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
		err = add_keys(dev, i, 1);
		if (ent->cur < 2 * ent->limit) {
			if (err == -EAGAIN) {
				mlx5_ib_dbg(dev, "returned eagain, order %d\n",
					    i + 2);
				queue_delayed_work(cache->wq, &ent->dwork,
						   msecs_to_jiffies(3));
			} else if (err) {
				mlx5_ib_warn(dev, "command failed order %d, err %d\n",
					     i + 2, err);
				queue_delayed_work(cache->wq, &ent->dwork,
						   msecs_to_jiffies(1000));
			} else {
				queue_work(cache->wq, &ent->work);
			}
		}
421
	} else if (ent->cur > 2 * ent->limit) {
422 423 424 425 426 427 428 429 430 431 432 433 434
		/*
		 * The remove_keys() logic is performed as garbage collection
		 * task. Such task is intended to be run when no other active
		 * processes are running.
		 *
		 * The need_resched() will return TRUE if there are user tasks
		 * to be activated in near future.
		 *
		 * In such case, we don't execute remove_keys() and postpone
		 * the garbage collection work to try to run in next cycle,
		 * in order to free CPU resources to other tasks.
		 */
		if (!need_resched() && !someone_adding(cache) &&
E
Eli Cohen 已提交
435
		    time_after(jiffies, cache->last_add + 300 * HZ)) {
436 437 438 439
			remove_keys(dev, i, 1);
			if (ent->cur > ent->limit)
				queue_work(cache->wq, &ent->work);
		} else {
E
Eli Cohen 已提交
440
			queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
		}
	}
}

static void delayed_cache_work_func(struct work_struct *work)
{
	struct mlx5_cache_ent *ent;

	ent = container_of(work, struct mlx5_cache_ent, dwork.work);
	__cache_work_func(ent);
}

static void cache_work_func(struct work_struct *work)
{
	struct mlx5_cache_ent *ent;

	ent = container_of(work, struct mlx5_cache_ent, work);
	__cache_work_func(ent);
}

461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent;
	struct mlx5_ib_mr *mr;
	int err;

	if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
		mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
		return NULL;
	}

	ent = &cache->ent[entry];
	while (1) {
		spin_lock_irq(&ent->lock);
		if (list_empty(&ent->head)) {
			spin_unlock_irq(&ent->lock);

			err = add_keys(dev, entry, 1);
480
			if (err && err != -EAGAIN)
481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
				return ERR_PTR(err);

			wait_for_completion(&ent->compl);
		} else {
			mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
					      list);
			list_del(&mr->list);
			ent->cur--;
			spin_unlock_irq(&ent->lock);
			if (ent->cur < ent->limit)
				queue_work(cache->wq, &ent->work);
			return mr;
		}
	}
}

497 498 499 500 501
static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_ib_mr *mr = NULL;
	struct mlx5_cache_ent *ent;
502
	int last_umr_cache_entry;
503 504 505 506
	int c;
	int i;

	c = order2idx(dev, order);
507
	last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
508
	if (c < 0 || c > last_umr_cache_entry) {
509 510 511 512
		mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
		return NULL;
	}

513
	for (i = c; i <= last_umr_cache_entry; i++) {
514 515 516 517
		ent = &cache->ent[i];

		mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);

E
Eli Cohen 已提交
518
		spin_lock_irq(&ent->lock);
519 520 521 522 523
		if (!list_empty(&ent->head)) {
			mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
					      list);
			list_del(&mr->list);
			ent->cur--;
E
Eli Cohen 已提交
524
			spin_unlock_irq(&ent->lock);
525 526 527 528
			if (ent->cur < ent->limit)
				queue_work(cache->wq, &ent->work);
			break;
		}
E
Eli Cohen 已提交
529
		spin_unlock_irq(&ent->lock);
530 531 532 533 534 535 536 537 538 539

		queue_work(cache->wq, &ent->work);
	}

	if (!mr)
		cache->ent[c].miss++;

	return mr;
}

540
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
541 542 543 544 545 546 547 548 549 550 551
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent;
	int shrink = 0;
	int c;

	c = order2idx(dev, mr->order);
	if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
		mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
		return;
	}
552 553 554 555

	if (unreg_umr(dev, mr))
		return;

556
	ent = &cache->ent[c];
E
Eli Cohen 已提交
557
	spin_lock_irq(&ent->lock);
558 559 560 561
	list_add_tail(&mr->list, &ent->head);
	ent->cur++;
	if (ent->cur > 2 * ent->limit)
		shrink = 1;
E
Eli Cohen 已提交
562
	spin_unlock_irq(&ent->lock);
563 564 565 566 567 568 569 570 571

	if (shrink)
		queue_work(cache->wq, &ent->work);
}

static void clean_keys(struct mlx5_ib_dev *dev, int c)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent = &cache->ent[c];
572
	struct mlx5_ib_mr *tmp_mr;
573
	struct mlx5_ib_mr *mr;
574
	LIST_HEAD(del_list);
575

576
	cancel_delayed_work(&ent->dwork);
577
	while (1) {
E
Eli Cohen 已提交
578
		spin_lock_irq(&ent->lock);
579
		if (list_empty(&ent->head)) {
E
Eli Cohen 已提交
580
			spin_unlock_irq(&ent->lock);
581
			break;
582 583
		}
		mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
584
		list_move(&mr->list, &del_list);
585 586
		ent->cur--;
		ent->size--;
E
Eli Cohen 已提交
587
		spin_unlock_irq(&ent->lock);
588 589 590 591 592 593 594 595 596 597
		mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
	}

#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
	synchronize_srcu(&dev->mr_srcu);
#endif

	list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
		list_del(&mr->list);
		kfree(mr);
598 599 600
	}
}

601 602
static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
603
	if (!mlx5_debugfs_root || dev->rep)
604 605 606 607 608 609
		return;

	debugfs_remove_recursive(dev->cache.root);
	dev->cache.root = NULL;
}

610 611 612 613 614 615
static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent;
	int i;

616
	if (!mlx5_debugfs_root || dev->rep)
617 618
		return 0;

619
	cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
620 621 622 623 624 625 626 627
	if (!cache->root)
		return -ENOMEM;

	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
		ent = &cache->ent[i];
		sprintf(ent->name, "%d", ent->order);
		ent->dir = debugfs_create_dir(ent->name,  cache->root);
		if (!ent->dir)
628
			goto err;
629 630 631 632

		ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
						 &size_fops);
		if (!ent->fsize)
633
			goto err;
634 635 636 637

		ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
						  &limit_fops);
		if (!ent->flimit)
638
			goto err;
639 640 641 642

		ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
					       &ent->cur);
		if (!ent->fcur)
643
			goto err;
644 645 646 647

		ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
						&ent->miss);
		if (!ent->fmiss)
648
			goto err;
649 650 651
	}

	return 0;
652 653
err:
	mlx5_mr_cache_debugfs_cleanup(dev);
654

655
	return -ENOMEM;
656 657
}

658
static void delay_time_func(struct timer_list *t)
E
Eli Cohen 已提交
659
{
660
	struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
E
Eli Cohen 已提交
661 662 663 664

	dev->fill_delay = 0;
}

665 666 667 668 669 670 671
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent;
	int err;
	int i;

672
	mutex_init(&dev->slow_path_mutex);
673
	cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
674 675 676 677 678
	if (!cache->wq) {
		mlx5_ib_warn(dev, "failed to create work queue\n");
		return -ENOMEM;
	}

679
	timer_setup(&dev->delay_timer, delay_time_func, 0);
680 681 682 683 684 685
	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
		ent = &cache->ent[i];
		INIT_LIST_HEAD(&ent->head);
		spin_lock_init(&ent->lock);
		ent->order = i + 2;
		ent->dev = dev;
686
		ent->limit = 0;
687

688
		init_completion(&ent->compl);
689 690 691
		INIT_WORK(&ent->work, cache_work_func);
		INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
		queue_work(cache->wq, &ent->work);
692

693
		if (i > MR_CACHE_LAST_STD_ENTRY) {
694
			mlx5_odp_init_mr_cache_entry(ent);
695
			continue;
696
		}
697

698
		if (ent->order > mr_cache_max_order(dev))
699 700 701 702 703 704 705
			continue;

		ent->page = PAGE_SHIFT;
		ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
			   MLX5_IB_UMR_OCTOWORD;
		ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
		if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
706
		    !dev->rep &&
707 708 709 710
		    mlx5_core_is_pf(dev->mdev))
			ent->limit = dev->mdev->profile->mr_cache[i].limit;
		else
			ent->limit = 0;
711 712 713 714 715 716
	}

	err = mlx5_mr_cache_debugfs_init(dev);
	if (err)
		mlx5_ib_warn(dev, "cache debugfs failure\n");

717 718 719 720 721
	/*
	 * We don't want to fail driver if debugfs failed to initialize,
	 * so we are not forwarding error to the user.
	 */

722 723 724
	return 0;
}

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
static void wait_for_async_commands(struct mlx5_ib_dev *dev)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent;
	int total = 0;
	int i;
	int j;

	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
		ent = &cache->ent[i];
		for (j = 0 ; j < 1000; j++) {
			if (!ent->pending)
				break;
			msleep(50);
		}
	}
	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
		ent = &cache->ent[i];
		total += ent->pending;
	}

	if (total)
		mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
	else
		mlx5_ib_warn(dev, "done with all pending requests\n");
}

752 753 754 755
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
{
	int i;

756 757 758
	if (!dev->cache.wq)
		return 0;

759
	dev->cache.stopped = 1;
760
	flush_workqueue(dev->cache.wq);
761 762 763 764 765 766

	mlx5_mr_cache_debugfs_cleanup(dev);

	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
		clean_keys(dev, i);

767
	destroy_workqueue(dev->cache.wq);
768
	wait_for_async_commands(dev);
E
Eli Cohen 已提交
769
	del_timer_sync(&dev->delay_timer);
770

771 772 773 774 775 776
	return 0;
}

struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
777
	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
778
	struct mlx5_core_dev *mdev = dev->mdev;
779
	struct mlx5_ib_mr *mr;
780 781
	void *mkc;
	u32 *in;
782 783 784 785 786 787
	int err;

	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
	if (!mr)
		return ERR_PTR(-ENOMEM);

788
	in = kzalloc(inlen, GFP_KERNEL);
789 790 791 792 793
	if (!in) {
		err = -ENOMEM;
		goto err_free;
	}

794 795
	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);

796
	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
797 798 799 800 801
	MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
	MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
	MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
	MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
	MLX5_SET(mkc, mkc, lr, 1);
802

803 804 805 806 807 808
	MLX5_SET(mkc, mkc, length64, 1);
	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
	MLX5_SET(mkc, mkc, qpn, 0xffffff);
	MLX5_SET64(mkc, mkc, start_addr, 0);

	err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
809 810 811 812
	if (err)
		goto err_in;

	kfree(in);
A
Artemy Kovalyov 已提交
813
	mr->mmkey.type = MLX5_MKEY_MR;
814 815
	mr->ibmr.lkey = mr->mmkey.key;
	mr->ibmr.rkey = mr->mmkey.key;
816 817 818 819 820 821 822 823 824 825 826 827 828
	mr->umem = NULL;

	return &mr->ibmr;

err_in:
	kfree(in);

err_free:
	kfree(mr);

	return ERR_PTR(err);
}

829
static int get_octo_len(u64 addr, u64 len, int page_shift)
830
{
831
	u64 page_size = 1ULL << page_shift;
832 833 834 835
	u64 offset;
	int npages;

	offset = addr & (page_size - 1);
836
	npages = ALIGN(len + offset, page_size) >> page_shift;
837 838 839
	return (npages + 1) / 2;
}

840
static int mr_cache_max_order(struct mlx5_ib_dev *dev)
841
{
842
	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
843
		return MR_CACHE_LAST_STD_ENTRY + 2;
844 845 846
	return MLX5_MAX_UMR_SHIFT;
}

847 848 849 850
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
		       int access_flags, struct ib_umem **umem,
		       int *npages, int *page_shift, int *ncont,
		       int *order)
851 852
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
853
	struct ib_umem *u;
854 855
	int err;

856 857 858 859
	*umem = NULL;

	u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
	err = PTR_ERR_OR_ZERO(u);
860
	if (err) {
861
		mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
862
		return err;
863 864
	}

865
	mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
866
			   page_shift, ncont, order);
867 868
	if (!*npages) {
		mlx5_ib_warn(dev, "avoid zero region\n");
869
		ib_umem_release(u);
870
		return -EINVAL;
871 872
	}

873 874
	*umem = u;

875 876 877
	mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
		    *npages, *ncont, *order, *page_shift);

878
	return 0;
879 880
}

881
static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
882
{
883 884
	struct mlx5_ib_umr_context *context =
		container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
885

886 887 888
	context->status = wc->status;
	complete(&context->done);
}
889

890 891 892 893 894
static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
{
	context->cqe.done = mlx5_ib_umr_done;
	context->status = -1;
	init_completion(&context->done);
895 896
}

897 898 899 900
static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
				  struct mlx5_umr_wr *umrwr)
{
	struct umr_common *umrc = &dev->umrc;
901
	const struct ib_send_wr *bad;
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
	int err;
	struct mlx5_ib_umr_context umr_context;

	mlx5_ib_init_umr_context(&umr_context);
	umrwr->wr.wr_cqe = &umr_context.cqe;

	down(&umrc->sem);
	err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
	if (err) {
		mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
	} else {
		wait_for_completion(&umr_context.done);
		if (umr_context.status != IB_WC_SUCCESS) {
			mlx5_ib_warn(dev, "reg umr failed (%u)\n",
				     umr_context.status);
			err = -EFAULT;
		}
	}
	up(&umrc->sem);
	return err;
}

924 925
static struct mlx5_ib_mr *alloc_mr_from_cache(
				  struct ib_pd *pd, struct ib_umem *umem,
926 927 928 929 930
				  u64 virt_addr, u64 len, int npages,
				  int page_shift, int order, int access_flags)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_ib_mr *mr;
931
	int err = 0;
932 933
	int i;

E
Eli Cohen 已提交
934
	for (i = 0; i < 1; i++) {
935 936 937 938 939
		mr = alloc_cached_mr(dev, order);
		if (mr)
			break;

		err = add_keys(dev, order2idx(dev, order), 1);
E
Eli Cohen 已提交
940 941
		if (err && err != -EAGAIN) {
			mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
942 943 944 945 946 947 948
			break;
		}
	}

	if (!mr)
		return ERR_PTR(-EAGAIN);

949 950 951 952
	mr->ibmr.pd = pd;
	mr->umem = umem;
	mr->access_flags = access_flags;
	mr->desc_size = sizeof(struct mlx5_mtt);
953 954 955
	mr->mmkey.iova = virt_addr;
	mr->mmkey.size = len;
	mr->mmkey.pd = to_mpd(pd)->pdn;
956

957 958 959
	return mr;
}

960 961 962
static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
			       void *xlt, int page_shift, size_t size,
			       int flags)
963 964 965
{
	struct mlx5_ib_dev *dev = mr->dev;
	struct ib_umem *umem = mr->umem;
966

967
	if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
968 969
		if (!umr_can_use_indirect_mkey(dev))
			return -EPERM;
970 971 972
		mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
		return npages;
	}
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997

	npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);

	if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
		__mlx5_ib_populate_pas(dev, umem, page_shift,
				       idx, npages, xlt,
				       MLX5_IB_MTT_PRESENT);
		/* Clear padding after the pages
		 * brought from the umem.
		 */
		memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
		       size - npages * sizeof(struct mlx5_mtt));
	}

	return npages;
}

#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
			    MLX5_UMR_MTT_ALIGNMENT)
#define MLX5_SPARE_UMR_CHUNK 0x10000

int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
		       int page_shift, int flags)
{
	struct mlx5_ib_dev *dev = mr->dev;
998
	struct device *ddev = dev->ib_dev.dev.parent;
999
	int size;
1000
	void *xlt;
1001
	dma_addr_t dma;
C
Christoph Hellwig 已提交
1002
	struct mlx5_umr_wr wr;
1003 1004
	struct ib_sge sg;
	int err = 0;
1005 1006 1007
	int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
			       ? sizeof(struct mlx5_klm)
			       : sizeof(struct mlx5_mtt);
1008 1009
	const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
	const int page_mask = page_align - 1;
1010 1011 1012
	size_t pages_mapped = 0;
	size_t pages_to_map = 0;
	size_t pages_iter = 0;
1013
	gfp_t gfp;
1014
	bool use_emergency_page = false;
1015

1016 1017 1018
	if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
	    !umr_can_use_indirect_mkey(dev))
		return -EPERM;
1019 1020

	/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
1021 1022 1023 1024 1025
	 * so we need to align the offset and length accordingly
	 */
	if (idx & page_mask) {
		npages += idx & page_mask;
		idx &= ~page_mask;
1026 1027
	}

1028 1029
	gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
	gfp |= __GFP_ZERO | __GFP_NOWARN;
1030

1031 1032 1033
	pages_to_map = ALIGN(npages, page_align);
	size = desc_size * pages_to_map;
	size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
1034

1035 1036 1037 1038 1039 1040 1041
	xlt = (void *)__get_free_pages(gfp, get_order(size));
	if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
		mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
			    size, get_order(size), MLX5_SPARE_UMR_CHUNK);

		size = MLX5_SPARE_UMR_CHUNK;
		xlt = (void *)__get_free_pages(gfp, get_order(size));
1042
	}
1043 1044 1045

	if (!xlt) {
		mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
1046
		xlt = (void *)mlx5_ib_get_xlt_emergency_page();
1047 1048
		size = PAGE_SIZE;
		memset(xlt, 0, size);
1049
		use_emergency_page = true;
1050 1051 1052
	}
	pages_iter = size / desc_size;
	dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
1053
	if (dma_mapping_error(ddev, dma)) {
1054
		mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
1055
		err = -ENOMEM;
1056
		goto free_xlt;
1057 1058
	}

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
	sg.addr = dma;
	sg.lkey = dev->umrc.pd->local_dma_lkey;

	memset(&wr, 0, sizeof(wr));
	wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
	if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
		wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
	wr.wr.sg_list = &sg;
	wr.wr.num_sge = 1;
	wr.wr.opcode = MLX5_IB_WR_UMR;

	wr.pd = mr->ibmr.pd;
	wr.mkey = mr->mmkey.key;
	wr.length = mr->mmkey.size;
	wr.virt_addr = mr->mmkey.iova;
	wr.access_flags = mr->access_flags;
	wr.page_shift = page_shift;

1077 1078
	for (pages_mapped = 0;
	     pages_mapped < pages_to_map && !err;
1079
	     pages_mapped += pages_iter, idx += pages_iter) {
1080
		npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
1081
		dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
1082
		npages = populate_xlt(mr, idx, npages, xlt,
1083
				      page_shift, size, flags);
1084 1085 1086

		dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
		sg.length = ALIGN(npages * desc_size,
				  MLX5_UMR_MTT_ALIGNMENT);

		if (pages_mapped + pages_iter >= pages_to_map) {
			if (flags & MLX5_IB_UPD_XLT_ENABLE)
				wr.wr.send_flags |=
					MLX5_IB_SEND_UMR_ENABLE_MR |
					MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
					MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
			if (flags & MLX5_IB_UPD_XLT_PD ||
			    flags & MLX5_IB_UPD_XLT_ACCESS)
				wr.wr.send_flags |=
					MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
			if (flags & MLX5_IB_UPD_XLT_ADDR)
				wr.wr.send_flags |=
					MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
		}
1104

1105
		wr.offset = idx * desc_size;
1106
		wr.xlt_size = sg.length;
1107

1108
		err = mlx5_ib_post_send_wait(dev, &wr);
1109 1110 1111
	}
	dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);

1112
free_xlt:
1113 1114
	if (use_emergency_page)
		mlx5_ib_put_xlt_emergency_page();
1115
	else
1116
		free_pages((unsigned long)xlt, get_order(size));
1117 1118 1119 1120

	return err;
}

1121 1122 1123 1124 1125 1126 1127
/*
 * If ibmr is NULL it will be allocated by reg_create.
 * Else, the given ibmr will be used.
 */
static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
				     u64 virt_addr, u64 length,
				     struct ib_umem *umem, int npages,
1128 1129
				     int page_shift, int access_flags,
				     bool populate)
1130 1131 1132
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_ib_mr *mr;
1133 1134
	__be64 *pas;
	void *mkc;
1135
	int inlen;
1136
	u32 *in;
1137
	int err;
1138
	bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1139

1140
	mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
1141 1142 1143
	if (!mr)
		return ERR_PTR(-ENOMEM);

1144 1145 1146 1147 1148 1149
	mr->ibmr.pd = pd;
	mr->access_flags = access_flags;

	inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
	if (populate)
		inlen += sizeof(*pas) * roundup(npages, 2);
1150
	in = kvzalloc(inlen, GFP_KERNEL);
1151 1152 1153 1154
	if (!in) {
		err = -ENOMEM;
		goto err_1;
	}
1155
	pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1156
	if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
1157 1158
		mlx5_ib_populate_pas(dev, umem, page_shift, pas,
				     pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1159

1160
	/* The pg_access bit allows setting the access flags
1161
	 * in the page list submitted with the command. */
1162 1163 1164
	MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));

	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1165
	MLX5_SET(mkc, mkc, free, !populate);
1166
	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
1167 1168 1169 1170 1171
	MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
	MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
	MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
	MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
	MLX5_SET(mkc, mkc, lr, 1);
1172
	MLX5_SET(mkc, mkc, umr_en, 1);
1173 1174 1175 1176 1177 1178

	MLX5_SET64(mkc, mkc, start_addr, virt_addr);
	MLX5_SET64(mkc, mkc, len, length);
	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
	MLX5_SET(mkc, mkc, bsf_octword_size, 0);
	MLX5_SET(mkc, mkc, translations_octword_size,
1179
		 get_octo_len(virt_addr, length, page_shift));
1180 1181
	MLX5_SET(mkc, mkc, log_page_size, page_shift);
	MLX5_SET(mkc, mkc, qpn, 0xffffff);
1182 1183
	if (populate) {
		MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1184
			 get_octo_len(virt_addr, length, page_shift));
1185
	}
1186 1187

	err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1188 1189 1190 1191
	if (err) {
		mlx5_ib_warn(dev, "create mkey failed\n");
		goto err_2;
	}
A
Artemy Kovalyov 已提交
1192
	mr->mmkey.type = MLX5_MKEY_MR;
1193
	mr->desc_size = sizeof(struct mlx5_mtt);
1194
	mr->dev = dev;
A
Al Viro 已提交
1195
	kvfree(in);
1196

1197
	mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1198 1199 1200 1201

	return mr;

err_2:
A
Al Viro 已提交
1202
	kvfree(in);
1203 1204

err_1:
1205 1206
	if (!ibmr)
		kfree(mr);
1207 1208 1209 1210

	return ERR_PTR(err);
}

1211 1212 1213 1214 1215
static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
			  int npages, u64 length, int access_flags)
{
	mr->npages = npages;
	atomic_add(npages, &dev->mdev->priv.reg_pages);
1216 1217
	mr->ibmr.lkey = mr->mmkey.key;
	mr->ibmr.rkey = mr->mmkey.key;
1218
	mr->ibmr.length = length;
1219
	mr->access_flags = access_flags;
1220 1221
}

1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
					  u64 length, int acc)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
	struct mlx5_core_dev *mdev = dev->mdev;
	struct mlx5_ib_mr *mr;
	void *mkc;
	u32 *in;
	int err;

	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
	if (!mr)
		return ERR_PTR(-ENOMEM);

	in = kzalloc(inlen, GFP_KERNEL);
	if (!in) {
		err = -ENOMEM;
		goto err_free;
	}

	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);

	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MEMIC & 0x3);
	MLX5_SET(mkc, mkc, access_mode_4_2,
		 (MLX5_MKC_ACCESS_MODE_MEMIC >> 2) & 0x7);
	MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
	MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
	MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
	MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
	MLX5_SET(mkc, mkc, lr, 1);

	MLX5_SET64(mkc, mkc, len, length);
	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
	MLX5_SET(mkc, mkc, qpn, 0xffffff);
	MLX5_SET64(mkc, mkc, start_addr,
		   memic_addr - pci_resource_start(dev->mdev->pdev, 0));

	err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
	if (err)
		goto err_in;

	kfree(in);

	mr->umem = NULL;
	set_mr_fileds(dev, mr, 0, length, acc);

	return &mr->ibmr;

err_in:
	kfree(in);

err_free:
	kfree(mr);

	return ERR_PTR(err);
}

struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
				struct ib_dm_mr_attr *attr,
				struct uverbs_attr_bundle *attrs)
{
	struct mlx5_ib_dm *mdm = to_mdm(dm);
	u64 memic_addr;

	if (attr->access_flags & ~MLX5_IB_DM_ALLOWED_ACCESS)
		return ERR_PTR(-EINVAL);

	memic_addr = mdm->dev_addr + attr->offset;

	return mlx5_ib_get_memic_mr(pd, memic_addr, attr->length,
				    attr->access_flags);
}

1296 1297 1298 1299 1300 1301
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
				  u64 virt_addr, int access_flags,
				  struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_ib_mr *mr = NULL;
1302
	bool populate_mtts = false;
1303 1304 1305 1306 1307 1308 1309
	struct ib_umem *umem;
	int page_shift;
	int npages;
	int ncont;
	int order;
	int err;

1310
	if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1311
		return ERR_PTR(-EOPNOTSUPP);
1312

1313 1314
	mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
		    start, virt_addr, length, access_flags);
1315 1316 1317 1318 1319 1320 1321 1322

#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
	if (!start && length == U64_MAX) {
		if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
		    !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
			return ERR_PTR(-EINVAL);

		mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1323 1324
		if (IS_ERR(mr))
			return ERR_CAST(mr);
1325 1326 1327 1328
		return &mr->ibmr;
	}
#endif

1329
	err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
1330
			   &page_shift, &ncont, &order);
1331

1332
	if (err < 0)
1333
		return ERR_PTR(err);
1334

1335
	if (use_umr(dev, order)) {
1336 1337
		mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
					 page_shift, order, access_flags);
1338
		if (PTR_ERR(mr) == -EAGAIN) {
1339
			mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
1340 1341
			mr = NULL;
		}
1342
		populate_mtts = false;
1343 1344 1345
	} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
		if (access_flags & IB_ACCESS_ON_DEMAND) {
			err = -EINVAL;
1346
			pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
1347 1348
			goto error;
		}
1349
		populate_mtts = true;
1350 1351
	}

1352
	if (!mr) {
1353 1354
		if (!umr_can_modify_entity_size(dev))
			populate_mtts = true;
1355
		mutex_lock(&dev->slow_path_mutex);
1356
		mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1357
				page_shift, access_flags, populate_mtts);
1358 1359
		mutex_unlock(&dev->slow_path_mutex);
	}
1360 1361 1362 1363 1364 1365

	if (IS_ERR(mr)) {
		err = PTR_ERR(mr);
		goto error;
	}

1366
	mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1367 1368

	mr->umem = umem;
1369
	set_mr_fileds(dev, mr, npages, length, access_flags);
1370

1371
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1372
	update_odp_mr(mr);
1373 1374
#endif

1375
	if (!populate_mtts) {
1376 1377 1378 1379
		int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;

		if (access_flags & IB_ACCESS_ON_DEMAND)
			update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1380

1381 1382
		err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
					 update_xlt_flags);
1383

1384
		if (err) {
1385
			dereg_mr(dev, mr);
1386 1387 1388 1389
			return ERR_PTR(err);
		}
	}

1390
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1391
	mr->live = 1;
1392
#endif
1393
	return &mr->ibmr;
1394 1395 1396 1397 1398 1399 1400
error:
	ib_umem_release(umem);
	return ERR_PTR(err);
}

static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
1401
	struct mlx5_core_dev *mdev = dev->mdev;
1402
	struct mlx5_umr_wr umrwr = {};
1403

1404 1405 1406
	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
		return 0;

1407 1408 1409 1410
	umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
			      MLX5_IB_SEND_UMR_FAIL_IF_FREE;
	umrwr.wr.opcode = MLX5_IB_WR_UMR;
	umrwr.mkey = mr->mmkey.key;
1411

1412
	return mlx5_ib_post_send_wait(dev, &umrwr);
1413 1414
}

1415
static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1416 1417 1418 1419 1420 1421 1422 1423
		     int access_flags, int flags)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_umr_wr umrwr = {};
	int err;

	umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;

1424 1425
	umrwr.wr.opcode = MLX5_IB_WR_UMR;
	umrwr.mkey = mr->mmkey.key;
1426

1427
	if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
1428 1429
		umrwr.pd = pd;
		umrwr.access_flags = access_flags;
1430
		umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1431 1432
	}

1433
	err = mlx5_ib_post_send_wait(dev, &umrwr);
1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448

	return err;
}

int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
			  u64 length, u64 virt_addr, int new_access_flags,
			  struct ib_pd *new_pd, struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
	struct mlx5_ib_mr *mr = to_mmr(ib_mr);
	struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
	int access_flags = flags & IB_MR_REREG_ACCESS ?
			    new_access_flags :
			    mr->access_flags;
	int page_shift = 0;
1449
	int upd_flags = 0;
1450 1451 1452
	int npages = 0;
	int ncont = 0;
	int order = 0;
1453
	u64 addr, len;
1454 1455 1456 1457 1458
	int err;

	mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
		    start, virt_addr, length, access_flags);

1459 1460
	atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);

1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
	if (!mr->umem)
		return -EINVAL;

	if (flags & IB_MR_REREG_TRANS) {
		addr = virt_addr;
		len = length;
	} else {
		addr = mr->umem->address;
		len = mr->umem->length;
	}

1472 1473 1474 1475 1476 1477 1478
	if (flags != IB_MR_REREG_PD) {
		/*
		 * Replace umem. This needs to be done whether or not UMR is
		 * used.
		 */
		flags |= IB_MR_REREG_TRANS;
		ib_umem_release(mr->umem);
1479
		mr->umem = NULL;
1480 1481
		err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
				  &npages, &page_shift, &ncont, &order);
1482 1483
		if (err)
			goto err;
1484 1485 1486 1487 1488 1489
	}

	if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
		/*
		 * UMR can't be used - MKey needs to be replaced.
		 */
1490
		if (mr->allocated_from_cache)
1491
			err = unreg_umr(dev, mr);
1492
		else
1493 1494
			err = destroy_mkey(dev, mr);
		if (err)
1495
			goto err;
1496 1497

		mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1498
				page_shift, access_flags, true);
1499

1500 1501 1502 1503 1504
		if (IS_ERR(mr)) {
			err = PTR_ERR(mr);
			mr = to_mmr(ib_mr);
			goto err;
		}
1505

1506
		mr->allocated_from_cache = 0;
1507
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1508
		mr->live = 1;
1509
#endif
1510 1511 1512 1513
	} else {
		/*
		 * Send a UMR WQE
		 */
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
		mr->ibmr.pd = pd;
		mr->access_flags = access_flags;
		mr->mmkey.iova = addr;
		mr->mmkey.size = len;
		mr->mmkey.pd = to_mpd(pd)->pdn;

		if (flags & IB_MR_REREG_TRANS) {
			upd_flags = MLX5_IB_UPD_XLT_ADDR;
			if (flags & IB_MR_REREG_PD)
				upd_flags |= MLX5_IB_UPD_XLT_PD;
			if (flags & IB_MR_REREG_ACCESS)
				upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
			err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
						 upd_flags);
		} else {
			err = rereg_umr(pd, mr, access_flags, flags);
		}

1532 1533
		if (err)
			goto err;
1534 1535
	}

1536
	set_mr_fileds(dev, mr, npages, len, access_flags);
1537 1538 1539 1540 1541

#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
	update_odp_mr(mr);
#endif
	return 0;
1542 1543 1544 1545 1546 1547 1548 1549

err:
	if (mr->umem) {
		ib_umem_release(mr->umem);
		mr->umem = NULL;
	}
	clean_mr(dev, mr);
	return err;
1550 1551
}

1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
static int
mlx5_alloc_priv_descs(struct ib_device *device,
		      struct mlx5_ib_mr *mr,
		      int ndescs,
		      int desc_size)
{
	int size = ndescs * desc_size;
	int add_size;
	int ret;

	add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);

	mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
	if (!mr->descs_alloc)
		return -ENOMEM;

	mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);

1570
	mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
1571
				      size, DMA_TO_DEVICE);
1572
	if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
		ret = -ENOMEM;
		goto err;
	}

	return 0;
err:
	kfree(mr->descs_alloc);

	return ret;
}

static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
	if (mr->descs) {
		struct ib_device *device = mr->ibmr.device;
		int size = mr->max_descs * mr->desc_size;

1591
		dma_unmap_single(device->dev.parent, mr->desc_map,
1592 1593 1594 1595 1596 1597
				 size, DMA_TO_DEVICE);
		kfree(mr->descs_alloc);
		mr->descs = NULL;
	}
}

1598
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1599
{
1600
	int allocated_from_cache = mr->allocated_from_cache;
1601

1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
	if (mr->sig) {
		if (mlx5_core_destroy_psv(dev->mdev,
					  mr->sig->psv_memory.psv_idx))
			mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
				     mr->sig->psv_memory.psv_idx);
		if (mlx5_core_destroy_psv(dev->mdev,
					  mr->sig->psv_wire.psv_idx))
			mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
				     mr->sig->psv_wire.psv_idx);
		kfree(mr->sig);
		mr->sig = NULL;
	}

1615 1616
	mlx5_free_priv_descs(mr);

1617 1618
	if (!allocated_from_cache)
		destroy_mkey(dev, mr);
1619 1620
}

1621
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1622 1623 1624 1625 1626
{
	int npages = mr->npages;
	struct ib_umem *umem = mr->umem;

#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1627 1628 1629
	if (umem && umem->odp_data) {
		/* Prevent new page faults from succeeding */
		mr->live = 0;
1630 1631
		/* Wait for all running page-fault handlers to finish. */
		synchronize_srcu(&dev->mr_srcu);
1632
		/* Destroy all page mappings */
1633 1634 1635 1636 1637
		if (umem->odp_data->page_list)
			mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
						 ib_umem_end(umem));
		else
			mlx5_ib_free_implicit_mr(mr);
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
		/*
		 * We kill the umem before the MR for ODP,
		 * so that there will not be any invalidations in
		 * flight, looking at the *mr struct.
		 */
		ib_umem_release(umem);
		atomic_sub(npages, &dev->mdev->priv.reg_pages);

		/* Avoid double-freeing the umem. */
		umem = NULL;
	}
1649 1650
#endif

1651
	clean_mr(dev, mr);
1652

1653 1654
	if (umem) {
		ib_umem_release(umem);
1655
		atomic_sub(npages, &dev->mdev->priv.reg_pages);
1656 1657
	}

1658 1659 1660 1661
	if (!mr->allocated_from_cache)
		kfree(mr);
	else
		mlx5_mr_cache_free(dev, mr);
1662 1663
}

1664 1665
int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
{
1666 1667
	dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr));
	return 0;
1668 1669
}

S
Sagi Grimberg 已提交
1670 1671 1672
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
			       enum ib_mr_type mr_type,
			       u32 max_num_sg)
1673 1674
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1675
	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1676
	int ndescs = ALIGN(max_num_sg, 4);
1677 1678 1679
	struct mlx5_ib_mr *mr;
	void *mkc;
	u32 *in;
1680
	int err;
1681 1682 1683 1684 1685

	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
	if (!mr)
		return ERR_PTR(-ENOMEM);

1686
	in = kzalloc(inlen, GFP_KERNEL);
1687 1688 1689 1690 1691
	if (!in) {
		err = -ENOMEM;
		goto err_free;
	}

1692 1693 1694 1695 1696
	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
	MLX5_SET(mkc, mkc, free, 1);
	MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
	MLX5_SET(mkc, mkc, qpn, 0xffffff);
	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1697

S
Sagi Grimberg 已提交
1698
	if (mr_type == IB_MR_TYPE_MEM_REG) {
1699 1700
		mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
		MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
1701
		err = mlx5_alloc_priv_descs(pd->device, mr,
1702
					    ndescs, sizeof(struct mlx5_mtt));
1703 1704 1705
		if (err)
			goto err_free_in;

1706
		mr->desc_size = sizeof(struct mlx5_mtt);
1707
		mr->max_descs = ndescs;
1708
	} else if (mr_type == IB_MR_TYPE_SG_GAPS) {
1709
		mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
1710 1711 1712 1713 1714 1715 1716

		err = mlx5_alloc_priv_descs(pd->device, mr,
					    ndescs, sizeof(struct mlx5_klm));
		if (err)
			goto err_free_in;
		mr->desc_size = sizeof(struct mlx5_klm);
		mr->max_descs = ndescs;
S
Sagi Grimberg 已提交
1717
	} else if (mr_type == IB_MR_TYPE_SIGNATURE) {
1718 1719
		u32 psv_index[2];

1720 1721
		MLX5_SET(mkc, mkc, bsf_en, 1);
		MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1722 1723 1724 1725 1726 1727 1728
		mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
		if (!mr->sig) {
			err = -ENOMEM;
			goto err_free_in;
		}

		/* create mem & wire PSVs */
1729
		err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
1730 1731 1732 1733
					   2, psv_index);
		if (err)
			goto err_free_sig;

1734
		mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
1735 1736
		mr->sig->psv_memory.psv_idx = psv_index[0];
		mr->sig->psv_wire.psv_idx = psv_index[1];
1737 1738 1739 1740 1741

		mr->sig->sig_status_checked = true;
		mr->sig->sig_err_exists = false;
		/* Next UMR, Arm SIGERR */
		++mr->sig->sigerr_count;
S
Sagi Grimberg 已提交
1742 1743 1744 1745
	} else {
		mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
		err = -EINVAL;
		goto err_free_in;
1746 1747
	}

1748 1749
	MLX5_SET(mkc, mkc, access_mode_1_0, mr->access_mode & 0x3);
	MLX5_SET(mkc, mkc, access_mode_4_2, (mr->access_mode >> 2) & 0x7);
1750 1751
	MLX5_SET(mkc, mkc, umr_en, 1);

1752
	mr->ibmr.device = pd->device;
1753
	err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1754 1755 1756
	if (err)
		goto err_destroy_psv;

A
Artemy Kovalyov 已提交
1757
	mr->mmkey.type = MLX5_MKEY_MR;
1758 1759
	mr->ibmr.lkey = mr->mmkey.key;
	mr->ibmr.rkey = mr->mmkey.key;
1760 1761 1762 1763 1764 1765 1766
	mr->umem = NULL;
	kfree(in);

	return &mr->ibmr;

err_destroy_psv:
	if (mr->sig) {
1767
		if (mlx5_core_destroy_psv(dev->mdev,
1768 1769 1770
					  mr->sig->psv_memory.psv_idx))
			mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
				     mr->sig->psv_memory.psv_idx);
1771
		if (mlx5_core_destroy_psv(dev->mdev,
1772 1773 1774 1775
					  mr->sig->psv_wire.psv_idx))
			mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
				     mr->sig->psv_wire.psv_idx);
	}
1776
	mlx5_free_priv_descs(mr);
1777 1778 1779 1780 1781 1782 1783 1784 1785
err_free_sig:
	kfree(mr->sig);
err_free_in:
	kfree(in);
err_free:
	kfree(mr);
	return ERR_PTR(err);
}

1786 1787 1788 1789
struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
			       struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1790
	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1791
	struct mlx5_ib_mw *mw = NULL;
1792 1793
	u32 *in = NULL;
	void *mkc;
1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
	int ndescs;
	int err;
	struct mlx5_ib_alloc_mw req = {};
	struct {
		__u32	comp_mask;
		__u32	response_length;
	} resp = {};

	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
	if (err)
		return ERR_PTR(err);

	if (req.comp_mask || req.reserved1 || req.reserved2)
		return ERR_PTR(-EOPNOTSUPP);

	if (udata->inlen > sizeof(req) &&
	    !ib_is_udata_cleared(udata, sizeof(req),
				 udata->inlen - sizeof(req)))
		return ERR_PTR(-EOPNOTSUPP);

	ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);

	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
1817
	in = kzalloc(inlen, GFP_KERNEL);
1818 1819 1820 1821 1822
	if (!mw || !in) {
		err = -ENOMEM;
		goto free;
	}

1823 1824 1825 1826 1827 1828 1829
	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);

	MLX5_SET(mkc, mkc, free, 1);
	MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
	MLX5_SET(mkc, mkc, umr_en, 1);
	MLX5_SET(mkc, mkc, lr, 1);
1830
	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
1831 1832 1833 1834
	MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
	MLX5_SET(mkc, mkc, qpn, 0xffffff);

	err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
1835 1836 1837
	if (err)
		goto free;

A
Artemy Kovalyov 已提交
1838
	mw->mmkey.type = MLX5_MKEY_MW;
1839
	mw->ibmw.rkey = mw->mmkey.key;
A
Artemy Kovalyov 已提交
1840
	mw->ndescs = ndescs;
1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872

	resp.response_length = min(offsetof(typeof(resp), response_length) +
				   sizeof(resp.response_length), udata->outlen);
	if (resp.response_length) {
		err = ib_copy_to_udata(udata, &resp, resp.response_length);
		if (err) {
			mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
			goto free;
		}
	}

	kfree(in);
	return &mw->ibmw;

free:
	kfree(mw);
	kfree(in);
	return ERR_PTR(err);
}

int mlx5_ib_dealloc_mw(struct ib_mw *mw)
{
	struct mlx5_ib_mw *mmw = to_mmw(mw);
	int err;

	err =  mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
				      &mmw->mmkey);
	if (!err)
		kfree(mmw);
	return err;
}

1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
			    struct ib_mr_status *mr_status)
{
	struct mlx5_ib_mr *mmr = to_mmr(ibmr);
	int ret = 0;

	if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
		pr_err("Invalid status check mask\n");
		ret = -EINVAL;
		goto done;
	}

	mr_status->fail_status = 0;
	if (check_mask & IB_MR_CHECK_SIG_STATUS) {
		if (!mmr->sig) {
			ret = -EINVAL;
			pr_err("signature status check requested on a non-signature enabled MR\n");
			goto done;
		}

		mmr->sig->sig_status_checked = true;
		if (!mmr->sig->sig_err_exists)
			goto done;

		if (ibmr->lkey == mmr->sig->err_item.key)
			memcpy(&mr_status->sig_err, &mmr->sig->err_item,
			       sizeof(mr_status->sig_err));
		else {
			mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
			mr_status->sig_err.sig_err_offset = 0;
			mr_status->sig_err.key = mmr->sig->err_item.key;
		}

		mmr->sig->sig_err_exists = false;
		mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
	}

done:
	return ret;
}
1913

1914 1915 1916
static int
mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
		   struct scatterlist *sgl,
1917
		   unsigned short sg_nents,
1918
		   unsigned int *sg_offset_p)
1919 1920 1921
{
	struct scatterlist *sg = sgl;
	struct mlx5_klm *klms = mr->descs;
1922
	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1923 1924 1925
	u32 lkey = mr->ibmr.pd->local_dma_lkey;
	int i;

1926
	mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1927 1928 1929
	mr->ibmr.length = 0;

	for_each_sg(sgl, sg, sg_nents, i) {
1930
		if (unlikely(i >= mr->max_descs))
1931
			break;
1932 1933
		klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
		klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
1934
		klms[i].key = cpu_to_be32(lkey);
1935
		mr->ibmr.length += sg_dma_len(sg) - sg_offset;
1936 1937

		sg_offset = 0;
1938
	}
1939
	mr->ndescs = i;
1940

1941 1942 1943
	if (sg_offset_p)
		*sg_offset_p = sg_offset;

1944 1945 1946
	return i;
}

1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960
static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
{
	struct mlx5_ib_mr *mr = to_mmr(ibmr);
	__be64 *descs;

	if (unlikely(mr->ndescs == mr->max_descs))
		return -ENOMEM;

	descs = mr->descs;
	descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);

	return 0;
}

1961
int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1962
		      unsigned int *sg_offset)
1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
{
	struct mlx5_ib_mr *mr = to_mmr(ibmr);
	int n;

	mr->ndescs = 0;

	ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
				   mr->desc_size * mr->max_descs,
				   DMA_TO_DEVICE);

1973
	if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
1974
		n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
1975
	else
1976 1977
		n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
				mlx5_set_page);
1978 1979 1980 1981 1982 1983 1984

	ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
				      mr->desc_size * mr->max_descs,
				      DMA_TO_DEVICE);

	return n;
}