mr.c 48.5 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */


#include <linux/kref.h>
#include <linux/random.h>
#include <linux/debugfs.h>
#include <linux/export.h>
E
Eli Cohen 已提交
38
#include <linux/delay.h>
39
#include <rdma/ib_umem.h>
40
#include <rdma/ib_umem_odp.h>
41
#include <rdma/ib_verbs.h>
42 43 44
#include "mlx5_ib.h"

enum {
E
Eli Cohen 已提交
45
	MAX_PENDING_REG_MR = 8,
46 47
};

48
#define MLX5_UMR_ALIGN 2048
49

50 51
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
52
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
53
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
{
	return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
}

static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
	return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
}

static bool use_umr(struct mlx5_ib_dev *dev, int order)
{
	return order <= mr_cache_max_order(dev) &&
		umr_can_modify_entity_size(dev);
}
69

70 71
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
72
	int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
73 74 75

#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
	/* Wait until all page fault handlers using the mr complete. */
76
	synchronize_srcu(&dev->mr_srcu);
77 78 79 80 81
#endif

	return err;
}

82 83 84 85 86 87 88 89 90 91
static int order2idx(struct mlx5_ib_dev *dev, int order)
{
	struct mlx5_mr_cache *cache = &dev->cache;

	if (order < cache->ent[0].order)
		return 0;
	else
		return order - cache->ent[0].order;
}

92 93 94 95 96 97
static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
{
	return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
		length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
}

98 99
static void update_odp_mr(struct mlx5_ib_mr *mr)
{
100
	if (is_odp_mr(mr)) {
101 102 103 104 105 106 107 108
		/*
		 * This barrier prevents the compiler from moving the
		 * setting of umem->odp_data->private to point to our
		 * MR, before reg_umr finished, to ensure that the MR
		 * initialization have finished before starting to
		 * handle invalidations.
		 */
		smp_wmb();
109
		to_ib_umem_odp(mr->umem)->private = mr;
110 111 112 113 114 115 116 117 118 119 120 121 122 123
		/*
		 * Make sure we will see the new
		 * umem->odp_data->private value in the invalidation
		 * routines, before we can get page faults on the
		 * MR. Page faults can happen once we put the MR in
		 * the tree, below this line. Without the barrier,
		 * there can be a fault handling and an invalidation
		 * before umem->odp_data->private == mr is visible to
		 * the invalidation handler.
		 */
		smp_wmb();
	}
}

E
Eli Cohen 已提交
124 125 126 127 128 129 130 131 132
static void reg_mr_callback(int status, void *context)
{
	struct mlx5_ib_mr *mr = context;
	struct mlx5_ib_dev *dev = mr->dev;
	struct mlx5_mr_cache *cache = &dev->cache;
	int c = order2idx(dev, mr->order);
	struct mlx5_cache_ent *ent = &cache->ent[c];
	u8 key;
	unsigned long flags;
133
	struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
134
	int err;
E
Eli Cohen 已提交
135 136 137 138 139 140 141 142 143 144 145 146

	spin_lock_irqsave(&ent->lock, flags);
	ent->pending--;
	spin_unlock_irqrestore(&ent->lock, flags);
	if (status) {
		mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
		kfree(mr);
		dev->fill_delay = 1;
		mod_timer(&dev->delay_timer, jiffies + HZ);
		return;
	}

A
Artemy Kovalyov 已提交
147
	mr->mmkey.type = MLX5_MKEY_MR;
148 149 150
	spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
	key = dev->mdev->priv.mkey_key++;
	spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
151
	mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
E
Eli Cohen 已提交
152 153 154 155 156 157 158 159

	cache->last_add = jiffies;

	spin_lock_irqsave(&ent->lock, flags);
	list_add_tail(&mr->list, &ent->head);
	ent->cur++;
	ent->size++;
	spin_unlock_irqrestore(&ent->lock, flags);
160 161

	write_lock_irqsave(&table->lock, flags);
162 163
	err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
				&mr->mmkey);
164
	if (err)
165
		pr_err("Error inserting to mkey tree. 0x%x\n", -err);
166
	write_unlock_irqrestore(&table->lock, flags);
167 168 169

	if (!completion_done(&ent->compl))
		complete(&ent->compl);
E
Eli Cohen 已提交
170 171
}

172 173 174 175
static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent = &cache->ent[c];
176
	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
177
	struct mlx5_ib_mr *mr;
178 179
	void *mkc;
	u32 *in;
180 181 182
	int err = 0;
	int i;

183
	in = kzalloc(inlen, GFP_KERNEL);
184 185 186
	if (!in)
		return -ENOMEM;

187
	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
188
	for (i = 0; i < num; i++) {
E
Eli Cohen 已提交
189 190 191 192 193
		if (ent->pending >= MAX_PENDING_REG_MR) {
			err = -EAGAIN;
			break;
		}

194 195 196
		mr = kzalloc(sizeof(*mr), GFP_KERNEL);
		if (!mr) {
			err = -ENOMEM;
E
Eli Cohen 已提交
197
			break;
198 199
		}
		mr->order = ent->order;
200
		mr->allocated_from_cache = 1;
E
Eli Cohen 已提交
201
		mr->dev = dev;
202 203 204

		MLX5_SET(mkc, mkc, free, 1);
		MLX5_SET(mkc, mkc, umr_en, 1);
205 206 207
		MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
		MLX5_SET(mkc, mkc, access_mode_4_2,
			 (ent->access_mode >> 2) & 0x7);
208 209

		MLX5_SET(mkc, mkc, qpn, 0xffffff);
210 211
		MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
		MLX5_SET(mkc, mkc, log_page_size, ent->page);
212

E
Eli Cohen 已提交
213 214 215
		spin_lock_irq(&ent->lock);
		ent->pending++;
		spin_unlock_irq(&ent->lock);
216 217 218 219
		err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
					       in, inlen,
					       mr->out, sizeof(mr->out),
					       reg_mr_callback, mr);
220
		if (err) {
E
Eli Cohen 已提交
221 222 223
			spin_lock_irq(&ent->lock);
			ent->pending--;
			spin_unlock_irq(&ent->lock);
224 225
			mlx5_ib_warn(dev, "create mkey failed %d\n", err);
			kfree(mr);
E
Eli Cohen 已提交
226
			break;
227 228 229 230 231 232 233 234 235 236 237
		}
	}

	kfree(in);
	return err;
}

static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent = &cache->ent[c];
238
	struct mlx5_ib_mr *tmp_mr;
239
	struct mlx5_ib_mr *mr;
240
	LIST_HEAD(del_list);
241 242 243
	int i;

	for (i = 0; i < num; i++) {
E
Eli Cohen 已提交
244
		spin_lock_irq(&ent->lock);
245
		if (list_empty(&ent->head)) {
E
Eli Cohen 已提交
246
			spin_unlock_irq(&ent->lock);
247
			break;
248 249
		}
		mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
250
		list_move(&mr->list, &del_list);
251 252
		ent->cur--;
		ent->size--;
E
Eli Cohen 已提交
253
		spin_unlock_irq(&ent->lock);
254 255 256 257
		mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
	}

#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
258
	synchronize_srcu(&dev->mr_srcu);
259 260 261 262 263
#endif

	list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
		list_del(&mr->list);
		kfree(mr);
264 265 266 267 268 269 270 271
	}
}

static ssize_t size_write(struct file *filp, const char __user *buf,
			  size_t count, loff_t *pos)
{
	struct mlx5_cache_ent *ent = filp->private_data;
	struct mlx5_ib_dev *dev = ent->dev;
272
	char lbuf[20] = {0};
273 274 275 276
	u32 var;
	int err;
	int c;

277 278
	count = min(count, sizeof(lbuf) - 1);
	if (copy_from_user(lbuf, buf, count))
279
		return -EFAULT;
280 281 282 283 284 285 286 287 288 289

	c = order2idx(dev, ent->order);

	if (sscanf(lbuf, "%u", &var) != 1)
		return -EINVAL;

	if (var < ent->limit)
		return -EINVAL;

	if (var > ent->size) {
E
Eli Cohen 已提交
290 291 292 293 294 295 296
		do {
			err = add_keys(dev, c, var - ent->size);
			if (err && err != -EAGAIN)
				return err;

			usleep_range(3000, 5000);
		} while (err);
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
	} else if (var < ent->size) {
		remove_keys(dev, c, ent->size - var);
	}

	return count;
}

static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
			 loff_t *pos)
{
	struct mlx5_cache_ent *ent = filp->private_data;
	char lbuf[20];
	int err;

	err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
	if (err < 0)
		return err;

315
	return simple_read_from_buffer(buf, count, pos, lbuf, err);
316 317 318 319 320 321 322 323 324 325 326 327 328 329
}

static const struct file_operations size_fops = {
	.owner	= THIS_MODULE,
	.open	= simple_open,
	.write	= size_write,
	.read	= size_read,
};

static ssize_t limit_write(struct file *filp, const char __user *buf,
			   size_t count, loff_t *pos)
{
	struct mlx5_cache_ent *ent = filp->private_data;
	struct mlx5_ib_dev *dev = ent->dev;
330
	char lbuf[20] = {0};
331 332 333 334
	u32 var;
	int err;
	int c;

335 336
	count = min(count, sizeof(lbuf) - 1);
	if (copy_from_user(lbuf, buf, count))
337
		return -EFAULT;
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368

	c = order2idx(dev, ent->order);

	if (sscanf(lbuf, "%u", &var) != 1)
		return -EINVAL;

	if (var > ent->size)
		return -EINVAL;

	ent->limit = var;

	if (ent->cur < ent->limit) {
		err = add_keys(dev, c, 2 * ent->limit - ent->cur);
		if (err)
			return err;
	}

	return count;
}

static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
			  loff_t *pos)
{
	struct mlx5_cache_ent *ent = filp->private_data;
	char lbuf[20];
	int err;

	err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
	if (err < 0)
		return err;

369
	return simple_read_from_buffer(buf, count, pos, lbuf, err);
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
}

static const struct file_operations limit_fops = {
	.owner	= THIS_MODULE,
	.open	= simple_open,
	.write	= limit_write,
	.read	= limit_read,
};

static int someone_adding(struct mlx5_mr_cache *cache)
{
	int i;

	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
		if (cache->ent[i].cur < cache->ent[i].limit)
			return 1;
	}

	return 0;
}

static void __cache_work_func(struct mlx5_cache_ent *ent)
{
	struct mlx5_ib_dev *dev = ent->dev;
	struct mlx5_mr_cache *cache = &dev->cache;
	int i = order2idx(dev, ent->order);
E
Eli Cohen 已提交
396
	int err;
397 398 399 400 401

	if (cache->stopped)
		return;

	ent = &dev->cache.ent[i];
E
Eli Cohen 已提交
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
	if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
		err = add_keys(dev, i, 1);
		if (ent->cur < 2 * ent->limit) {
			if (err == -EAGAIN) {
				mlx5_ib_dbg(dev, "returned eagain, order %d\n",
					    i + 2);
				queue_delayed_work(cache->wq, &ent->dwork,
						   msecs_to_jiffies(3));
			} else if (err) {
				mlx5_ib_warn(dev, "command failed order %d, err %d\n",
					     i + 2, err);
				queue_delayed_work(cache->wq, &ent->dwork,
						   msecs_to_jiffies(1000));
			} else {
				queue_work(cache->wq, &ent->work);
			}
		}
419
	} else if (ent->cur > 2 * ent->limit) {
420 421 422 423 424 425 426 427 428 429 430 431 432
		/*
		 * The remove_keys() logic is performed as garbage collection
		 * task. Such task is intended to be run when no other active
		 * processes are running.
		 *
		 * The need_resched() will return TRUE if there are user tasks
		 * to be activated in near future.
		 *
		 * In such case, we don't execute remove_keys() and postpone
		 * the garbage collection work to try to run in next cycle,
		 * in order to free CPU resources to other tasks.
		 */
		if (!need_resched() && !someone_adding(cache) &&
E
Eli Cohen 已提交
433
		    time_after(jiffies, cache->last_add + 300 * HZ)) {
434 435 436 437
			remove_keys(dev, i, 1);
			if (ent->cur > ent->limit)
				queue_work(cache->wq, &ent->work);
		} else {
E
Eli Cohen 已提交
438
			queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
		}
	}
}

static void delayed_cache_work_func(struct work_struct *work)
{
	struct mlx5_cache_ent *ent;

	ent = container_of(work, struct mlx5_cache_ent, dwork.work);
	__cache_work_func(ent);
}

static void cache_work_func(struct work_struct *work)
{
	struct mlx5_cache_ent *ent;

	ent = container_of(work, struct mlx5_cache_ent, work);
	__cache_work_func(ent);
}

459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent;
	struct mlx5_ib_mr *mr;
	int err;

	if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
		mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
		return NULL;
	}

	ent = &cache->ent[entry];
	while (1) {
		spin_lock_irq(&ent->lock);
		if (list_empty(&ent->head)) {
			spin_unlock_irq(&ent->lock);

			err = add_keys(dev, entry, 1);
478
			if (err && err != -EAGAIN)
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
				return ERR_PTR(err);

			wait_for_completion(&ent->compl);
		} else {
			mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
					      list);
			list_del(&mr->list);
			ent->cur--;
			spin_unlock_irq(&ent->lock);
			if (ent->cur < ent->limit)
				queue_work(cache->wq, &ent->work);
			return mr;
		}
	}
}

495 496 497 498 499
static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_ib_mr *mr = NULL;
	struct mlx5_cache_ent *ent;
500
	int last_umr_cache_entry;
501 502 503 504
	int c;
	int i;

	c = order2idx(dev, order);
505
	last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
506
	if (c < 0 || c > last_umr_cache_entry) {
507 508 509 510
		mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
		return NULL;
	}

511
	for (i = c; i <= last_umr_cache_entry; i++) {
512 513 514 515
		ent = &cache->ent[i];

		mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);

E
Eli Cohen 已提交
516
		spin_lock_irq(&ent->lock);
517 518 519 520 521
		if (!list_empty(&ent->head)) {
			mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
					      list);
			list_del(&mr->list);
			ent->cur--;
E
Eli Cohen 已提交
522
			spin_unlock_irq(&ent->lock);
523 524 525 526
			if (ent->cur < ent->limit)
				queue_work(cache->wq, &ent->work);
			break;
		}
E
Eli Cohen 已提交
527
		spin_unlock_irq(&ent->lock);
528 529 530 531 532 533 534 535 536 537

		queue_work(cache->wq, &ent->work);
	}

	if (!mr)
		cache->ent[c].miss++;

	return mr;
}

538
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
539 540 541 542 543 544
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent;
	int shrink = 0;
	int c;

545 546 547
	if (!mr->allocated_from_cache)
		return;

548 549 550 551 552
	c = order2idx(dev, mr->order);
	if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
		mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
		return;
	}
553 554 555 556

	if (unreg_umr(dev, mr))
		return;

557
	ent = &cache->ent[c];
E
Eli Cohen 已提交
558
	spin_lock_irq(&ent->lock);
559 560 561 562
	list_add_tail(&mr->list, &ent->head);
	ent->cur++;
	if (ent->cur > 2 * ent->limit)
		shrink = 1;
E
Eli Cohen 已提交
563
	spin_unlock_irq(&ent->lock);
564 565 566 567 568 569 570 571 572

	if (shrink)
		queue_work(cache->wq, &ent->work);
}

static void clean_keys(struct mlx5_ib_dev *dev, int c)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent = &cache->ent[c];
573
	struct mlx5_ib_mr *tmp_mr;
574
	struct mlx5_ib_mr *mr;
575
	LIST_HEAD(del_list);
576

577
	cancel_delayed_work(&ent->dwork);
578
	while (1) {
E
Eli Cohen 已提交
579
		spin_lock_irq(&ent->lock);
580
		if (list_empty(&ent->head)) {
E
Eli Cohen 已提交
581
			spin_unlock_irq(&ent->lock);
582
			break;
583 584
		}
		mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
585
		list_move(&mr->list, &del_list);
586 587
		ent->cur--;
		ent->size--;
E
Eli Cohen 已提交
588
		spin_unlock_irq(&ent->lock);
589 590 591 592
		mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
	}

#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
593
	synchronize_srcu(&dev->mr_srcu);
594 595 596 597 598
#endif

	list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
		list_del(&mr->list);
		kfree(mr);
599 600 601
	}
}

602 603
static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
604
	if (!mlx5_debugfs_root || dev->rep)
605 606 607 608 609 610
		return;

	debugfs_remove_recursive(dev->cache.root);
	dev->cache.root = NULL;
}

611 612 613 614 615 616
static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent;
	int i;

617
	if (!mlx5_debugfs_root || dev->rep)
618 619
		return 0;

620
	cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
621 622 623 624 625 626 627 628
	if (!cache->root)
		return -ENOMEM;

	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
		ent = &cache->ent[i];
		sprintf(ent->name, "%d", ent->order);
		ent->dir = debugfs_create_dir(ent->name,  cache->root);
		if (!ent->dir)
629
			goto err;
630 631 632 633

		ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
						 &size_fops);
		if (!ent->fsize)
634
			goto err;
635 636 637 638

		ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
						  &limit_fops);
		if (!ent->flimit)
639
			goto err;
640 641 642 643

		ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
					       &ent->cur);
		if (!ent->fcur)
644
			goto err;
645 646 647 648

		ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
						&ent->miss);
		if (!ent->fmiss)
649
			goto err;
650 651 652
	}

	return 0;
653 654
err:
	mlx5_mr_cache_debugfs_cleanup(dev);
655

656
	return -ENOMEM;
657 658
}

659
static void delay_time_func(struct timer_list *t)
E
Eli Cohen 已提交
660
{
661
	struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
E
Eli Cohen 已提交
662 663 664 665

	dev->fill_delay = 0;
}

666 667 668 669 670 671 672
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent;
	int err;
	int i;

673
	mutex_init(&dev->slow_path_mutex);
674
	cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
675 676 677 678 679
	if (!cache->wq) {
		mlx5_ib_warn(dev, "failed to create work queue\n");
		return -ENOMEM;
	}

680
	timer_setup(&dev->delay_timer, delay_time_func, 0);
681 682 683 684 685 686
	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
		ent = &cache->ent[i];
		INIT_LIST_HEAD(&ent->head);
		spin_lock_init(&ent->lock);
		ent->order = i + 2;
		ent->dev = dev;
687
		ent->limit = 0;
688

689
		init_completion(&ent->compl);
690 691
		INIT_WORK(&ent->work, cache_work_func);
		INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
692

693
		if (i > MR_CACHE_LAST_STD_ENTRY) {
694
			mlx5_odp_init_mr_cache_entry(ent);
695
			continue;
696
		}
697

698
		if (ent->order > mr_cache_max_order(dev))
699 700 701 702 703 704 705
			continue;

		ent->page = PAGE_SHIFT;
		ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
			   MLX5_IB_UMR_OCTOWORD;
		ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
		if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
706
		    !dev->rep &&
707 708 709 710
		    mlx5_core_is_pf(dev->mdev))
			ent->limit = dev->mdev->profile->mr_cache[i].limit;
		else
			ent->limit = 0;
711
		queue_work(cache->wq, &ent->work);
712 713 714 715 716 717
	}

	err = mlx5_mr_cache_debugfs_init(dev);
	if (err)
		mlx5_ib_warn(dev, "cache debugfs failure\n");

718 719 720 721 722
	/*
	 * We don't want to fail driver if debugfs failed to initialize,
	 * so we are not forwarding error to the user.
	 */

723 724 725
	return 0;
}

726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
static void wait_for_async_commands(struct mlx5_ib_dev *dev)
{
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent;
	int total = 0;
	int i;
	int j;

	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
		ent = &cache->ent[i];
		for (j = 0 ; j < 1000; j++) {
			if (!ent->pending)
				break;
			msleep(50);
		}
	}
	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
		ent = &cache->ent[i];
		total += ent->pending;
	}

	if (total)
		mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
	else
		mlx5_ib_warn(dev, "done with all pending requests\n");
}

753 754 755 756
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
{
	int i;

757 758 759
	if (!dev->cache.wq)
		return 0;

760
	dev->cache.stopped = 1;
761
	flush_workqueue(dev->cache.wq);
762 763 764 765 766 767

	mlx5_mr_cache_debugfs_cleanup(dev);

	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
		clean_keys(dev, i);

768
	destroy_workqueue(dev->cache.wq);
769
	wait_for_async_commands(dev);
E
Eli Cohen 已提交
770
	del_timer_sync(&dev->delay_timer);
771

772 773 774 775 776 777
	return 0;
}

struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
778
	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
779
	struct mlx5_core_dev *mdev = dev->mdev;
780
	struct mlx5_ib_mr *mr;
781 782
	void *mkc;
	u32 *in;
783 784 785 786 787 788
	int err;

	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
	if (!mr)
		return ERR_PTR(-ENOMEM);

789
	in = kzalloc(inlen, GFP_KERNEL);
790 791 792 793 794
	if (!in) {
		err = -ENOMEM;
		goto err_free;
	}

795 796
	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);

797
	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
798 799 800 801 802
	MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
	MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
	MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
	MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
	MLX5_SET(mkc, mkc, lr, 1);
803

804 805 806 807 808 809
	MLX5_SET(mkc, mkc, length64, 1);
	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
	MLX5_SET(mkc, mkc, qpn, 0xffffff);
	MLX5_SET64(mkc, mkc, start_addr, 0);

	err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
810 811 812 813
	if (err)
		goto err_in;

	kfree(in);
A
Artemy Kovalyov 已提交
814
	mr->mmkey.type = MLX5_MKEY_MR;
815 816
	mr->ibmr.lkey = mr->mmkey.key;
	mr->ibmr.rkey = mr->mmkey.key;
817 818 819 820 821 822 823 824 825 826 827 828 829
	mr->umem = NULL;

	return &mr->ibmr;

err_in:
	kfree(in);

err_free:
	kfree(mr);

	return ERR_PTR(err);
}

830
static int get_octo_len(u64 addr, u64 len, int page_shift)
831
{
832
	u64 page_size = 1ULL << page_shift;
833 834 835 836
	u64 offset;
	int npages;

	offset = addr & (page_size - 1);
837
	npages = ALIGN(len + offset, page_size) >> page_shift;
838 839 840
	return (npages + 1) / 2;
}

841
static int mr_cache_max_order(struct mlx5_ib_dev *dev)
842
{
843
	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
844
		return MR_CACHE_LAST_STD_ENTRY + 2;
845 846 847
	return MLX5_MAX_UMR_SHIFT;
}

848 849 850 851
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
		       int access_flags, struct ib_umem **umem,
		       int *npages, int *page_shift, int *ncont,
		       int *order)
852 853
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
854
	struct ib_umem *u;
855 856
	int err;

857 858 859 860
	*umem = NULL;

	u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
	err = PTR_ERR_OR_ZERO(u);
861
	if (err) {
862
		mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
863
		return err;
864 865
	}

866
	mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
867
			   page_shift, ncont, order);
868 869
	if (!*npages) {
		mlx5_ib_warn(dev, "avoid zero region\n");
870
		ib_umem_release(u);
871
		return -EINVAL;
872 873
	}

874 875
	*umem = u;

876 877 878
	mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
		    *npages, *ncont, *order, *page_shift);

879
	return 0;
880 881
}

882
static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
883
{
884 885
	struct mlx5_ib_umr_context *context =
		container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
886

887 888 889
	context->status = wc->status;
	complete(&context->done);
}
890

891 892 893 894 895
static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
{
	context->cqe.done = mlx5_ib_umr_done;
	context->status = -1;
	init_completion(&context->done);
896 897
}

898 899 900 901
static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
				  struct mlx5_umr_wr *umrwr)
{
	struct umr_common *umrc = &dev->umrc;
902
	const struct ib_send_wr *bad;
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
	int err;
	struct mlx5_ib_umr_context umr_context;

	mlx5_ib_init_umr_context(&umr_context);
	umrwr->wr.wr_cqe = &umr_context.cqe;

	down(&umrc->sem);
	err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
	if (err) {
		mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
	} else {
		wait_for_completion(&umr_context.done);
		if (umr_context.status != IB_WC_SUCCESS) {
			mlx5_ib_warn(dev, "reg umr failed (%u)\n",
				     umr_context.status);
			err = -EFAULT;
		}
	}
	up(&umrc->sem);
	return err;
}

925 926
static struct mlx5_ib_mr *alloc_mr_from_cache(
				  struct ib_pd *pd, struct ib_umem *umem,
927 928 929 930 931
				  u64 virt_addr, u64 len, int npages,
				  int page_shift, int order, int access_flags)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_ib_mr *mr;
932
	int err = 0;
933 934
	int i;

E
Eli Cohen 已提交
935
	for (i = 0; i < 1; i++) {
936 937 938 939 940
		mr = alloc_cached_mr(dev, order);
		if (mr)
			break;

		err = add_keys(dev, order2idx(dev, order), 1);
E
Eli Cohen 已提交
941 942
		if (err && err != -EAGAIN) {
			mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
943 944 945 946 947 948 949
			break;
		}
	}

	if (!mr)
		return ERR_PTR(-EAGAIN);

950 951 952 953
	mr->ibmr.pd = pd;
	mr->umem = umem;
	mr->access_flags = access_flags;
	mr->desc_size = sizeof(struct mlx5_mtt);
954 955 956
	mr->mmkey.iova = virt_addr;
	mr->mmkey.size = len;
	mr->mmkey.pd = to_mpd(pd)->pdn;
957

958 959 960
	return mr;
}

961 962 963
static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
			       void *xlt, int page_shift, size_t size,
			       int flags)
964 965 966
{
	struct mlx5_ib_dev *dev = mr->dev;
	struct ib_umem *umem = mr->umem;
967

968
	if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
969 970
		if (!umr_can_use_indirect_mkey(dev))
			return -EPERM;
971 972 973
		mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
		return npages;
	}
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998

	npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);

	if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
		__mlx5_ib_populate_pas(dev, umem, page_shift,
				       idx, npages, xlt,
				       MLX5_IB_MTT_PRESENT);
		/* Clear padding after the pages
		 * brought from the umem.
		 */
		memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
		       size - npages * sizeof(struct mlx5_mtt));
	}

	return npages;
}

#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
			    MLX5_UMR_MTT_ALIGNMENT)
#define MLX5_SPARE_UMR_CHUNK 0x10000

int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
		       int page_shift, int flags)
{
	struct mlx5_ib_dev *dev = mr->dev;
999
	struct device *ddev = dev->ib_dev.dev.parent;
1000
	int size;
1001
	void *xlt;
1002
	dma_addr_t dma;
C
Christoph Hellwig 已提交
1003
	struct mlx5_umr_wr wr;
1004 1005
	struct ib_sge sg;
	int err = 0;
1006 1007 1008
	int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
			       ? sizeof(struct mlx5_klm)
			       : sizeof(struct mlx5_mtt);
1009 1010
	const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
	const int page_mask = page_align - 1;
1011 1012 1013
	size_t pages_mapped = 0;
	size_t pages_to_map = 0;
	size_t pages_iter = 0;
1014
	gfp_t gfp;
1015
	bool use_emergency_page = false;
1016

1017 1018 1019
	if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
	    !umr_can_use_indirect_mkey(dev))
		return -EPERM;
1020 1021

	/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
1022 1023 1024 1025 1026
	 * so we need to align the offset and length accordingly
	 */
	if (idx & page_mask) {
		npages += idx & page_mask;
		idx &= ~page_mask;
1027 1028
	}

1029 1030
	gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
	gfp |= __GFP_ZERO | __GFP_NOWARN;
1031

1032 1033 1034
	pages_to_map = ALIGN(npages, page_align);
	size = desc_size * pages_to_map;
	size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
1035

1036 1037 1038 1039 1040 1041 1042
	xlt = (void *)__get_free_pages(gfp, get_order(size));
	if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
		mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
			    size, get_order(size), MLX5_SPARE_UMR_CHUNK);

		size = MLX5_SPARE_UMR_CHUNK;
		xlt = (void *)__get_free_pages(gfp, get_order(size));
1043
	}
1044 1045 1046

	if (!xlt) {
		mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
1047
		xlt = (void *)mlx5_ib_get_xlt_emergency_page();
1048 1049
		size = PAGE_SIZE;
		memset(xlt, 0, size);
1050
		use_emergency_page = true;
1051 1052 1053
	}
	pages_iter = size / desc_size;
	dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
1054
	if (dma_mapping_error(ddev, dma)) {
1055
		mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
1056
		err = -ENOMEM;
1057
		goto free_xlt;
1058 1059
	}

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
	sg.addr = dma;
	sg.lkey = dev->umrc.pd->local_dma_lkey;

	memset(&wr, 0, sizeof(wr));
	wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
	if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
		wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
	wr.wr.sg_list = &sg;
	wr.wr.num_sge = 1;
	wr.wr.opcode = MLX5_IB_WR_UMR;

	wr.pd = mr->ibmr.pd;
	wr.mkey = mr->mmkey.key;
	wr.length = mr->mmkey.size;
	wr.virt_addr = mr->mmkey.iova;
	wr.access_flags = mr->access_flags;
	wr.page_shift = page_shift;

1078 1079
	for (pages_mapped = 0;
	     pages_mapped < pages_to_map && !err;
1080
	     pages_mapped += pages_iter, idx += pages_iter) {
1081
		npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
1082
		dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
1083
		npages = populate_xlt(mr, idx, npages, xlt,
1084
				      page_shift, size, flags);
1085 1086 1087

		dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
		sg.length = ALIGN(npages * desc_size,
				  MLX5_UMR_MTT_ALIGNMENT);

		if (pages_mapped + pages_iter >= pages_to_map) {
			if (flags & MLX5_IB_UPD_XLT_ENABLE)
				wr.wr.send_flags |=
					MLX5_IB_SEND_UMR_ENABLE_MR |
					MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
					MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
			if (flags & MLX5_IB_UPD_XLT_PD ||
			    flags & MLX5_IB_UPD_XLT_ACCESS)
				wr.wr.send_flags |=
					MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
			if (flags & MLX5_IB_UPD_XLT_ADDR)
				wr.wr.send_flags |=
					MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
		}
1105

1106
		wr.offset = idx * desc_size;
1107
		wr.xlt_size = sg.length;
1108

1109
		err = mlx5_ib_post_send_wait(dev, &wr);
1110 1111 1112
	}
	dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);

1113
free_xlt:
1114 1115
	if (use_emergency_page)
		mlx5_ib_put_xlt_emergency_page();
1116
	else
1117
		free_pages((unsigned long)xlt, get_order(size));
1118 1119 1120 1121

	return err;
}

1122 1123 1124 1125 1126 1127 1128
/*
 * If ibmr is NULL it will be allocated by reg_create.
 * Else, the given ibmr will be used.
 */
static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
				     u64 virt_addr, u64 length,
				     struct ib_umem *umem, int npages,
1129 1130
				     int page_shift, int access_flags,
				     bool populate)
1131 1132 1133
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_ib_mr *mr;
1134 1135
	__be64 *pas;
	void *mkc;
1136
	int inlen;
1137
	u32 *in;
1138
	int err;
1139
	bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1140

1141
	mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
1142 1143 1144
	if (!mr)
		return ERR_PTR(-ENOMEM);

1145 1146 1147 1148 1149 1150
	mr->ibmr.pd = pd;
	mr->access_flags = access_flags;

	inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
	if (populate)
		inlen += sizeof(*pas) * roundup(npages, 2);
1151
	in = kvzalloc(inlen, GFP_KERNEL);
1152 1153 1154 1155
	if (!in) {
		err = -ENOMEM;
		goto err_1;
	}
1156
	pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1157
	if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
1158 1159
		mlx5_ib_populate_pas(dev, umem, page_shift, pas,
				     pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1160

1161
	/* The pg_access bit allows setting the access flags
1162
	 * in the page list submitted with the command. */
1163 1164 1165
	MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));

	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1166
	MLX5_SET(mkc, mkc, free, !populate);
1167
	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
1168 1169 1170 1171 1172
	MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
	MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
	MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
	MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
	MLX5_SET(mkc, mkc, lr, 1);
1173
	MLX5_SET(mkc, mkc, umr_en, 1);
1174 1175 1176 1177 1178 1179

	MLX5_SET64(mkc, mkc, start_addr, virt_addr);
	MLX5_SET64(mkc, mkc, len, length);
	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
	MLX5_SET(mkc, mkc, bsf_octword_size, 0);
	MLX5_SET(mkc, mkc, translations_octword_size,
1180
		 get_octo_len(virt_addr, length, page_shift));
1181 1182
	MLX5_SET(mkc, mkc, log_page_size, page_shift);
	MLX5_SET(mkc, mkc, qpn, 0xffffff);
1183 1184
	if (populate) {
		MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1185
			 get_octo_len(virt_addr, length, page_shift));
1186
	}
1187 1188

	err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1189 1190 1191 1192
	if (err) {
		mlx5_ib_warn(dev, "create mkey failed\n");
		goto err_2;
	}
A
Artemy Kovalyov 已提交
1193
	mr->mmkey.type = MLX5_MKEY_MR;
1194
	mr->desc_size = sizeof(struct mlx5_mtt);
1195
	mr->dev = dev;
A
Al Viro 已提交
1196
	kvfree(in);
1197

1198
	mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1199 1200 1201 1202

	return mr;

err_2:
A
Al Viro 已提交
1203
	kvfree(in);
1204 1205

err_1:
1206 1207
	if (!ibmr)
		kfree(mr);
1208 1209 1210 1211

	return ERR_PTR(err);
}

1212
static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1213 1214 1215 1216
			  int npages, u64 length, int access_flags)
{
	mr->npages = npages;
	atomic_add(npages, &dev->mdev->priv.reg_pages);
1217 1218
	mr->ibmr.lkey = mr->mmkey.key;
	mr->ibmr.rkey = mr->mmkey.key;
1219
	mr->ibmr.length = length;
1220
	mr->access_flags = access_flags;
1221 1222
}

1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
					  u64 length, int acc)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
	struct mlx5_core_dev *mdev = dev->mdev;
	struct mlx5_ib_mr *mr;
	void *mkc;
	u32 *in;
	int err;

	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
	if (!mr)
		return ERR_PTR(-ENOMEM);

	in = kzalloc(inlen, GFP_KERNEL);
	if (!in) {
		err = -ENOMEM;
		goto err_free;
	}

	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);

	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MEMIC & 0x3);
	MLX5_SET(mkc, mkc, access_mode_4_2,
		 (MLX5_MKC_ACCESS_MODE_MEMIC >> 2) & 0x7);
	MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
	MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
	MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
	MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
	MLX5_SET(mkc, mkc, lr, 1);

	MLX5_SET64(mkc, mkc, len, length);
	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
	MLX5_SET(mkc, mkc, qpn, 0xffffff);
	MLX5_SET64(mkc, mkc, start_addr,
		   memic_addr - pci_resource_start(dev->mdev->pdev, 0));

	err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
	if (err)
		goto err_in;

	kfree(in);

	mr->umem = NULL;
1268
	set_mr_fields(dev, mr, 0, length, acc);
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280

	return &mr->ibmr;

err_in:
	kfree(in);

err_free:
	kfree(mr);

	return ERR_PTR(err);
}

M
Moni Shoua 已提交
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
int mlx5_ib_advise_mr(struct ib_pd *pd,
		      enum ib_uverbs_advise_mr_advice advice,
		      u32 flags,
		      struct ib_sge *sg_list,
		      u32 num_sge,
		      struct uverbs_attr_bundle *attrs)
{
	if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
	    advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE)
		return -EOPNOTSUPP;

	return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
					 sg_list, num_sge);
}

1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
				struct ib_dm_mr_attr *attr,
				struct uverbs_attr_bundle *attrs)
{
	struct mlx5_ib_dm *mdm = to_mdm(dm);
	u64 memic_addr;

	if (attr->access_flags & ~MLX5_IB_DM_ALLOWED_ACCESS)
		return ERR_PTR(-EINVAL);

	memic_addr = mdm->dev_addr + attr->offset;

	return mlx5_ib_get_memic_mr(pd, memic_addr, attr->length,
				    attr->access_flags);
}

1312 1313 1314 1315 1316 1317
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
				  u64 virt_addr, int access_flags,
				  struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_ib_mr *mr = NULL;
1318
	bool populate_mtts = false;
1319 1320 1321 1322 1323 1324 1325
	struct ib_umem *umem;
	int page_shift;
	int npages;
	int ncont;
	int order;
	int err;

1326
	if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1327
		return ERR_PTR(-EOPNOTSUPP);
1328

1329 1330
	mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
		    start, virt_addr, length, access_flags);
1331 1332 1333 1334 1335 1336 1337 1338

#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
	if (!start && length == U64_MAX) {
		if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
		    !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
			return ERR_PTR(-EINVAL);

		mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1339 1340
		if (IS_ERR(mr))
			return ERR_CAST(mr);
1341 1342 1343 1344
		return &mr->ibmr;
	}
#endif

1345
	err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
1346
			   &page_shift, &ncont, &order);
1347

1348
	if (err < 0)
1349
		return ERR_PTR(err);
1350

1351
	if (use_umr(dev, order)) {
1352 1353
		mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
					 page_shift, order, access_flags);
1354
		if (PTR_ERR(mr) == -EAGAIN) {
1355
			mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
1356 1357
			mr = NULL;
		}
1358
		populate_mtts = false;
1359 1360 1361
	} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
		if (access_flags & IB_ACCESS_ON_DEMAND) {
			err = -EINVAL;
1362
			pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
1363 1364
			goto error;
		}
1365
		populate_mtts = true;
1366 1367
	}

1368
	if (!mr) {
1369 1370
		if (!umr_can_modify_entity_size(dev))
			populate_mtts = true;
1371
		mutex_lock(&dev->slow_path_mutex);
1372
		mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1373
				page_shift, access_flags, populate_mtts);
1374 1375
		mutex_unlock(&dev->slow_path_mutex);
	}
1376 1377 1378 1379 1380 1381

	if (IS_ERR(mr)) {
		err = PTR_ERR(mr);
		goto error;
	}

1382
	mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1383 1384

	mr->umem = umem;
1385
	set_mr_fields(dev, mr, npages, length, access_flags);
1386

1387
	update_odp_mr(mr);
1388

1389
	if (!populate_mtts) {
1390 1391 1392 1393
		int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;

		if (access_flags & IB_ACCESS_ON_DEMAND)
			update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1394

1395 1396
		err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
					 update_xlt_flags);
1397

1398
		if (err) {
1399
			dereg_mr(dev, mr);
1400 1401 1402 1403
			return ERR_PTR(err);
		}
	}

1404
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1405
	mr->live = 1;
1406
#endif
1407
	return &mr->ibmr;
1408 1409 1410 1411 1412 1413 1414
error:
	ib_umem_release(umem);
	return ERR_PTR(err);
}

static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
1415
	struct mlx5_core_dev *mdev = dev->mdev;
1416
	struct mlx5_umr_wr umrwr = {};
1417

1418 1419 1420
	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
		return 0;

1421 1422 1423 1424
	umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
			      MLX5_IB_SEND_UMR_FAIL_IF_FREE;
	umrwr.wr.opcode = MLX5_IB_WR_UMR;
	umrwr.mkey = mr->mmkey.key;
1425

1426
	return mlx5_ib_post_send_wait(dev, &umrwr);
1427 1428
}

1429
static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1430 1431 1432 1433 1434 1435 1436 1437
		     int access_flags, int flags)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_umr_wr umrwr = {};
	int err;

	umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;

1438 1439
	umrwr.wr.opcode = MLX5_IB_WR_UMR;
	umrwr.mkey = mr->mmkey.key;
1440

1441
	if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
1442 1443
		umrwr.pd = pd;
		umrwr.access_flags = access_flags;
1444
		umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1445 1446
	}

1447
	err = mlx5_ib_post_send_wait(dev, &umrwr);
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462

	return err;
}

int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
			  u64 length, u64 virt_addr, int new_access_flags,
			  struct ib_pd *new_pd, struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
	struct mlx5_ib_mr *mr = to_mmr(ib_mr);
	struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
	int access_flags = flags & IB_MR_REREG_ACCESS ?
			    new_access_flags :
			    mr->access_flags;
	int page_shift = 0;
1463
	int upd_flags = 0;
1464 1465 1466
	int npages = 0;
	int ncont = 0;
	int order = 0;
1467
	u64 addr, len;
1468 1469 1470 1471 1472
	int err;

	mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
		    start, virt_addr, length, access_flags);

1473 1474
	atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);

1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
	if (!mr->umem)
		return -EINVAL;

	if (flags & IB_MR_REREG_TRANS) {
		addr = virt_addr;
		len = length;
	} else {
		addr = mr->umem->address;
		len = mr->umem->length;
	}

1486 1487 1488 1489 1490 1491 1492
	if (flags != IB_MR_REREG_PD) {
		/*
		 * Replace umem. This needs to be done whether or not UMR is
		 * used.
		 */
		flags |= IB_MR_REREG_TRANS;
		ib_umem_release(mr->umem);
1493
		mr->umem = NULL;
1494 1495
		err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
				  &npages, &page_shift, &ncont, &order);
1496 1497
		if (err)
			goto err;
1498 1499 1500 1501 1502 1503
	}

	if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
		/*
		 * UMR can't be used - MKey needs to be replaced.
		 */
1504
		if (mr->allocated_from_cache)
1505
			err = unreg_umr(dev, mr);
1506
		else
1507 1508
			err = destroy_mkey(dev, mr);
		if (err)
1509
			goto err;
1510 1511

		mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1512
				page_shift, access_flags, true);
1513

1514 1515 1516 1517 1518
		if (IS_ERR(mr)) {
			err = PTR_ERR(mr);
			mr = to_mmr(ib_mr);
			goto err;
		}
1519

1520
		mr->allocated_from_cache = 0;
1521
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1522
		mr->live = 1;
1523
#endif
1524 1525 1526 1527
	} else {
		/*
		 * Send a UMR WQE
		 */
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545
		mr->ibmr.pd = pd;
		mr->access_flags = access_flags;
		mr->mmkey.iova = addr;
		mr->mmkey.size = len;
		mr->mmkey.pd = to_mpd(pd)->pdn;

		if (flags & IB_MR_REREG_TRANS) {
			upd_flags = MLX5_IB_UPD_XLT_ADDR;
			if (flags & IB_MR_REREG_PD)
				upd_flags |= MLX5_IB_UPD_XLT_PD;
			if (flags & IB_MR_REREG_ACCESS)
				upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
			err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
						 upd_flags);
		} else {
			err = rereg_umr(pd, mr, access_flags, flags);
		}

1546 1547
		if (err)
			goto err;
1548 1549
	}

1550
	set_mr_fields(dev, mr, npages, len, access_flags);
1551 1552 1553

	update_odp_mr(mr);
	return 0;
1554 1555 1556 1557 1558 1559 1560 1561

err:
	if (mr->umem) {
		ib_umem_release(mr->umem);
		mr->umem = NULL;
	}
	clean_mr(dev, mr);
	return err;
1562 1563
}

1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
static int
mlx5_alloc_priv_descs(struct ib_device *device,
		      struct mlx5_ib_mr *mr,
		      int ndescs,
		      int desc_size)
{
	int size = ndescs * desc_size;
	int add_size;
	int ret;

	add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);

	mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
	if (!mr->descs_alloc)
		return -ENOMEM;

	mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);

1582
	mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
1583
				      size, DMA_TO_DEVICE);
1584
	if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
		ret = -ENOMEM;
		goto err;
	}

	return 0;
err:
	kfree(mr->descs_alloc);

	return ret;
}

static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
	if (mr->descs) {
		struct ib_device *device = mr->ibmr.device;
		int size = mr->max_descs * mr->desc_size;

1603
		dma_unmap_single(device->dev.parent, mr->desc_map,
1604 1605 1606 1607 1608 1609
				 size, DMA_TO_DEVICE);
		kfree(mr->descs_alloc);
		mr->descs = NULL;
	}
}

1610
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1611
{
1612
	int allocated_from_cache = mr->allocated_from_cache;
1613

1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
	if (mr->sig) {
		if (mlx5_core_destroy_psv(dev->mdev,
					  mr->sig->psv_memory.psv_idx))
			mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
				     mr->sig->psv_memory.psv_idx);
		if (mlx5_core_destroy_psv(dev->mdev,
					  mr->sig->psv_wire.psv_idx))
			mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
				     mr->sig->psv_wire.psv_idx);
		kfree(mr->sig);
		mr->sig = NULL;
	}

1627 1628
	mlx5_free_priv_descs(mr);

1629 1630
	if (!allocated_from_cache)
		destroy_mkey(dev, mr);
1631 1632
}

1633
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1634 1635 1636 1637
{
	int npages = mr->npages;
	struct ib_umem *umem = mr->umem;

1638
	if (is_odp_mr(mr)) {
1639 1640
		struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);

1641 1642
		/* Prevent new page faults from succeeding */
		mr->live = 0;
1643 1644
		/* Wait for all running page-fault handlers to finish. */
		synchronize_srcu(&dev->mr_srcu);
1645
		/* Destroy all page mappings */
1646 1647
		if (umem_odp->page_list)
			mlx5_ib_invalidate_range(umem_odp, ib_umem_start(umem),
1648 1649 1650
						 ib_umem_end(umem));
		else
			mlx5_ib_free_implicit_mr(mr);
1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
		/*
		 * We kill the umem before the MR for ODP,
		 * so that there will not be any invalidations in
		 * flight, looking at the *mr struct.
		 */
		ib_umem_release(umem);
		atomic_sub(npages, &dev->mdev->priv.reg_pages);

		/* Avoid double-freeing the umem. */
		umem = NULL;
	}
1662

1663
	clean_mr(dev, mr);
1664

1665 1666 1667 1668 1669
	/*
	 * We should unregister the DMA address from the HCA before
	 * remove the DMA mapping.
	 */
	mlx5_mr_cache_free(dev, mr);
1670 1671
	if (umem) {
		ib_umem_release(umem);
1672
		atomic_sub(npages, &dev->mdev->priv.reg_pages);
1673
	}
1674 1675
	if (!mr->allocated_from_cache)
		kfree(mr);
1676 1677
}

1678 1679
int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
{
1680 1681
	dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr));
	return 0;
1682 1683
}

S
Sagi Grimberg 已提交
1684 1685 1686
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
			       enum ib_mr_type mr_type,
			       u32 max_num_sg)
1687 1688
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1689
	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1690
	int ndescs = ALIGN(max_num_sg, 4);
1691 1692 1693
	struct mlx5_ib_mr *mr;
	void *mkc;
	u32 *in;
1694
	int err;
1695 1696 1697 1698 1699

	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
	if (!mr)
		return ERR_PTR(-ENOMEM);

1700
	in = kzalloc(inlen, GFP_KERNEL);
1701 1702 1703 1704 1705
	if (!in) {
		err = -ENOMEM;
		goto err_free;
	}

1706 1707 1708 1709 1710
	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
	MLX5_SET(mkc, mkc, free, 1);
	MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
	MLX5_SET(mkc, mkc, qpn, 0xffffff);
	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1711

S
Sagi Grimberg 已提交
1712
	if (mr_type == IB_MR_TYPE_MEM_REG) {
1713 1714
		mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
		MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
1715
		err = mlx5_alloc_priv_descs(pd->device, mr,
1716
					    ndescs, sizeof(struct mlx5_mtt));
1717 1718 1719
		if (err)
			goto err_free_in;

1720
		mr->desc_size = sizeof(struct mlx5_mtt);
1721
		mr->max_descs = ndescs;
1722
	} else if (mr_type == IB_MR_TYPE_SG_GAPS) {
1723
		mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
1724 1725 1726 1727 1728 1729 1730

		err = mlx5_alloc_priv_descs(pd->device, mr,
					    ndescs, sizeof(struct mlx5_klm));
		if (err)
			goto err_free_in;
		mr->desc_size = sizeof(struct mlx5_klm);
		mr->max_descs = ndescs;
S
Sagi Grimberg 已提交
1731
	} else if (mr_type == IB_MR_TYPE_SIGNATURE) {
1732 1733
		u32 psv_index[2];

1734 1735
		MLX5_SET(mkc, mkc, bsf_en, 1);
		MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1736 1737 1738 1739 1740 1741 1742
		mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
		if (!mr->sig) {
			err = -ENOMEM;
			goto err_free_in;
		}

		/* create mem & wire PSVs */
1743
		err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
1744 1745 1746 1747
					   2, psv_index);
		if (err)
			goto err_free_sig;

1748
		mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
1749 1750
		mr->sig->psv_memory.psv_idx = psv_index[0];
		mr->sig->psv_wire.psv_idx = psv_index[1];
1751 1752 1753 1754 1755

		mr->sig->sig_status_checked = true;
		mr->sig->sig_err_exists = false;
		/* Next UMR, Arm SIGERR */
		++mr->sig->sigerr_count;
S
Sagi Grimberg 已提交
1756 1757 1758 1759
	} else {
		mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
		err = -EINVAL;
		goto err_free_in;
1760 1761
	}

1762 1763
	MLX5_SET(mkc, mkc, access_mode_1_0, mr->access_mode & 0x3);
	MLX5_SET(mkc, mkc, access_mode_4_2, (mr->access_mode >> 2) & 0x7);
1764 1765
	MLX5_SET(mkc, mkc, umr_en, 1);

1766
	mr->ibmr.device = pd->device;
1767
	err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1768 1769 1770
	if (err)
		goto err_destroy_psv;

A
Artemy Kovalyov 已提交
1771
	mr->mmkey.type = MLX5_MKEY_MR;
1772 1773
	mr->ibmr.lkey = mr->mmkey.key;
	mr->ibmr.rkey = mr->mmkey.key;
1774 1775 1776 1777 1778 1779 1780
	mr->umem = NULL;
	kfree(in);

	return &mr->ibmr;

err_destroy_psv:
	if (mr->sig) {
1781
		if (mlx5_core_destroy_psv(dev->mdev,
1782 1783 1784
					  mr->sig->psv_memory.psv_idx))
			mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
				     mr->sig->psv_memory.psv_idx);
1785
		if (mlx5_core_destroy_psv(dev->mdev,
1786 1787 1788 1789
					  mr->sig->psv_wire.psv_idx))
			mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
				     mr->sig->psv_wire.psv_idx);
	}
1790
	mlx5_free_priv_descs(mr);
1791 1792 1793 1794 1795 1796 1797 1798 1799
err_free_sig:
	kfree(mr->sig);
err_free_in:
	kfree(in);
err_free:
	kfree(mr);
	return ERR_PTR(err);
}

1800 1801 1802 1803
struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
			       struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1804
	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1805
	struct mlx5_ib_mw *mw = NULL;
1806 1807
	u32 *in = NULL;
	void *mkc;
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
	int ndescs;
	int err;
	struct mlx5_ib_alloc_mw req = {};
	struct {
		__u32	comp_mask;
		__u32	response_length;
	} resp = {};

	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
	if (err)
		return ERR_PTR(err);

	if (req.comp_mask || req.reserved1 || req.reserved2)
		return ERR_PTR(-EOPNOTSUPP);

	if (udata->inlen > sizeof(req) &&
	    !ib_is_udata_cleared(udata, sizeof(req),
				 udata->inlen - sizeof(req)))
		return ERR_PTR(-EOPNOTSUPP);

	ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);

	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
1831
	in = kzalloc(inlen, GFP_KERNEL);
1832 1833 1834 1835 1836
	if (!mw || !in) {
		err = -ENOMEM;
		goto free;
	}

1837 1838 1839 1840 1841 1842 1843
	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);

	MLX5_SET(mkc, mkc, free, 1);
	MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
	MLX5_SET(mkc, mkc, umr_en, 1);
	MLX5_SET(mkc, mkc, lr, 1);
1844
	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
1845 1846 1847 1848
	MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
	MLX5_SET(mkc, mkc, qpn, 0xffffff);

	err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
1849 1850 1851
	if (err)
		goto free;

A
Artemy Kovalyov 已提交
1852
	mw->mmkey.type = MLX5_MKEY_MW;
1853
	mw->ibmw.rkey = mw->mmkey.key;
A
Artemy Kovalyov 已提交
1854
	mw->ndescs = ndescs;
1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886

	resp.response_length = min(offsetof(typeof(resp), response_length) +
				   sizeof(resp.response_length), udata->outlen);
	if (resp.response_length) {
		err = ib_copy_to_udata(udata, &resp, resp.response_length);
		if (err) {
			mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
			goto free;
		}
	}

	kfree(in);
	return &mw->ibmw;

free:
	kfree(mw);
	kfree(in);
	return ERR_PTR(err);
}

int mlx5_ib_dealloc_mw(struct ib_mw *mw)
{
	struct mlx5_ib_mw *mmw = to_mmw(mw);
	int err;

	err =  mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
				      &mmw->mmkey);
	if (!err)
		kfree(mmw);
	return err;
}

1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
			    struct ib_mr_status *mr_status)
{
	struct mlx5_ib_mr *mmr = to_mmr(ibmr);
	int ret = 0;

	if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
		pr_err("Invalid status check mask\n");
		ret = -EINVAL;
		goto done;
	}

	mr_status->fail_status = 0;
	if (check_mask & IB_MR_CHECK_SIG_STATUS) {
		if (!mmr->sig) {
			ret = -EINVAL;
			pr_err("signature status check requested on a non-signature enabled MR\n");
			goto done;
		}

		mmr->sig->sig_status_checked = true;
		if (!mmr->sig->sig_err_exists)
			goto done;

		if (ibmr->lkey == mmr->sig->err_item.key)
			memcpy(&mr_status->sig_err, &mmr->sig->err_item,
			       sizeof(mr_status->sig_err));
		else {
			mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
			mr_status->sig_err.sig_err_offset = 0;
			mr_status->sig_err.key = mmr->sig->err_item.key;
		}

		mmr->sig->sig_err_exists = false;
		mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
	}

done:
	return ret;
}
1927

1928 1929 1930
static int
mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
		   struct scatterlist *sgl,
1931
		   unsigned short sg_nents,
1932
		   unsigned int *sg_offset_p)
1933 1934 1935
{
	struct scatterlist *sg = sgl;
	struct mlx5_klm *klms = mr->descs;
1936
	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1937 1938 1939
	u32 lkey = mr->ibmr.pd->local_dma_lkey;
	int i;

1940
	mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1941 1942 1943
	mr->ibmr.length = 0;

	for_each_sg(sgl, sg, sg_nents, i) {
1944
		if (unlikely(i >= mr->max_descs))
1945
			break;
1946 1947
		klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
		klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
1948
		klms[i].key = cpu_to_be32(lkey);
1949
		mr->ibmr.length += sg_dma_len(sg) - sg_offset;
1950 1951

		sg_offset = 0;
1952
	}
1953
	mr->ndescs = i;
1954

1955 1956 1957
	if (sg_offset_p)
		*sg_offset_p = sg_offset;

1958 1959 1960
	return i;
}

1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
{
	struct mlx5_ib_mr *mr = to_mmr(ibmr);
	__be64 *descs;

	if (unlikely(mr->ndescs == mr->max_descs))
		return -ENOMEM;

	descs = mr->descs;
	descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);

	return 0;
}

1975
int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1976
		      unsigned int *sg_offset)
1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
{
	struct mlx5_ib_mr *mr = to_mmr(ibmr);
	int n;

	mr->ndescs = 0;

	ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
				   mr->desc_size * mr->max_descs,
				   DMA_TO_DEVICE);

1987
	if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
1988
		n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
1989
	else
1990 1991
		n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
				mlx5_set_page);
1992 1993 1994 1995 1996 1997 1998

	ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
				      mr->desc_size * mr->max_descs,
				      DMA_TO_DEVICE);

	return n;
}