mr.c 21.3 KB
Newer Older
1 2
/*
 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3
 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

35
#include <linux/init.h>
36
#include <linux/errno.h>
37
#include <linux/export.h>
38
#include <linux/slab.h>
39
#include <linux/kernel.h>
40
#include <linux/vmalloc.h>
41 42 43 44 45 46 47 48 49 50 51 52 53 54

#include <linux/mlx4/cmd.h>

#include "mlx4.h"
#include "icm.h"

static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
{
	int o;
	int m;
	u32 seg;

	spin_lock(&buddy->lock);

55 56 57 58 59 60 61
	for (o = order; o <= buddy->max_order; ++o)
		if (buddy->num_free[o]) {
			m = 1 << (buddy->max_order - o);
			seg = find_first_bit(buddy->bits[o], m);
			if (seg < m)
				goto found;
		}
62 63 64 65 66 67

	spin_unlock(&buddy->lock);
	return -1;

 found:
	clear_bit(seg, buddy->bits[o]);
68
	--buddy->num_free[o];
69 70 71 72 73

	while (o > order) {
		--o;
		seg <<= 1;
		set_bit(seg ^ 1, buddy->bits[o]);
74
		++buddy->num_free[o];
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
	}

	spin_unlock(&buddy->lock);

	seg <<= order;

	return seg;
}

static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
{
	seg >>= order;

	spin_lock(&buddy->lock);

	while (test_bit(seg ^ 1, buddy->bits[order])) {
		clear_bit(seg ^ 1, buddy->bits[order]);
92
		--buddy->num_free[order];
93 94 95 96 97
		seg >>= 1;
		++order;
	}

	set_bit(seg, buddy->bits[order]);
98
	++buddy->num_free[order];
99 100 101 102

	spin_unlock(&buddy->lock);
}

103
static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
104 105 106 107 108 109
{
	int i, s;

	buddy->max_order = max_order;
	spin_lock_init(&buddy->lock);

110
	buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),
111
			      GFP_KERNEL);
112
	buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
113 114
				  GFP_KERNEL);
	if (!buddy->bits || !buddy->num_free)
115 116 117 118
		goto err_out;

	for (i = 0; i <= buddy->max_order; ++i) {
		s = BITS_TO_LONGS(1 << (buddy->max_order - i));
119
		buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
120
		if (!buddy->bits[i]) {
121
			buddy->bits[i] = vzalloc(s * sizeof(long));
122 123 124
			if (!buddy->bits[i])
				goto err_out_free;
		}
125 126 127
	}

	set_bit(0, buddy->bits[buddy->max_order]);
128
	buddy->num_free[buddy->max_order] = 1;
129 130 131 132 133

	return 0;

err_out_free:
	for (i = 0; i <= buddy->max_order; ++i)
134 135 136 137
		if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i]))
			vfree(buddy->bits[i]);
		else
			kfree(buddy->bits[i]);
138

139
err_out:
140
	kfree(buddy->bits);
141
	kfree(buddy->num_free);
142 143 144 145 146 147 148 149 150

	return -ENOMEM;
}

static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
{
	int i;

	for (i = 0; i <= buddy->max_order; ++i)
151 152 153 154
		if (is_vmalloc_addr(buddy->bits[i]))
			vfree(buddy->bits[i]);
		else
			kfree(buddy->bits[i]);
155 156

	kfree(buddy->bits);
157
	kfree(buddy->num_free);
158 159
}

160
u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
161 162 163
{
	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
	u32 seg;
164 165
	int seg_order;
	u32 offset;
166

167 168 169
	seg_order = max_t(int, order - log_mtts_per_seg, 0);

	seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
170 171 172
	if (seg == -1)
		return -1;

173 174 175 176 177
	offset = seg * (1 << log_mtts_per_seg);

	if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
				 offset + (1 << order) - 1)) {
		mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
178 179 180
		return -1;
	}

181
	return offset;
182 183
}

184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
{
	u64 in_param;
	u64 out_param;
	int err;

	if (mlx4_is_mfunc(dev)) {
		set_param_l(&in_param, order);
		err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
						       RES_OP_RESERVE_AND_MAP,
						       MLX4_CMD_ALLOC_RES,
						       MLX4_CMD_TIME_CLASS_A,
						       MLX4_CMD_WRAPPED);
		if (err)
			return -1;
		return get_param_l(&out_param);
	}
	return __mlx4_alloc_mtt_range(dev, order);
}

204 205 206 207 208 209 210 211 212 213 214 215
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
		  struct mlx4_mtt *mtt)
{
	int i;

	if (!npages) {
		mtt->order      = -1;
		mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
		return 0;
	} else
		mtt->page_shift = page_shift;

216
	for (mtt->order = 0, i = 1; i < npages; i <<= 1)
217 218
		++mtt->order;

219 220
	mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
	if (mtt->offset == -1)
221 222 223 224 225 226
		return -ENOMEM;

	return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_init);

227
void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
228
{
229 230
	u32 first_seg;
	int seg_order;
231 232
	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;

233 234 235 236
	seg_order = max_t(int, order - log_mtts_per_seg, 0);
	first_seg = offset / (1 << log_mtts_per_seg);

	mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
237 238
	mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
			     offset + (1 << order) - 1);
239 240
}

241
static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
242 243 244 245 246
{
	u64 in_param;
	int err;

	if (mlx4_is_mfunc(dev)) {
247
		set_param_l(&in_param, offset);
248 249 250 251 252 253
		set_param_h(&in_param, order);
		err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
						       MLX4_CMD_FREE_RES,
						       MLX4_CMD_TIME_CLASS_A,
						       MLX4_CMD_WRAPPED);
		if (err)
254 255
			mlx4_warn(dev, "Failed to free mtt range at:"
				  "%d order:%d\n", offset, order);
256 257
		return;
	}
258
	 __mlx4_free_mtt_range(dev, offset, order);
259 260 261 262
}

void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
{
263 264 265
	if (mtt->order < 0)
		return;

266
	mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
267 268 269 270 271
}
EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);

u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
{
272
	return (u64) mtt->offset * dev->caps.mtt_entry_sz;
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
}
EXPORT_SYMBOL_GPL(mlx4_mtt_addr);

static u32 hw_index_to_key(u32 ind)
{
	return (ind >> 24) | (ind << 8);
}

static u32 key_to_hw_index(u32 key)
{
	return (key << 24) | (key >> 8);
}

static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
			  int mpt_index)
{
289
	return mlx4_cmd(dev, mailbox->dma, mpt_index,
290 291
			0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
			MLX4_CMD_WRAPPED);
292 293 294 295 296 297
}

static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
			  int mpt_index)
{
	return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
298 299
			    !mailbox, MLX4_CMD_HW2SW_MPT,
			    MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
300 301
}

302
static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
303 304 305
			   u64 iova, u64 size, u32 access, int npages,
			   int page_shift, struct mlx4_mr *mr)
{
306 307 308 309
	mr->iova       = iova;
	mr->size       = size;
	mr->pd	       = pd;
	mr->access     = access;
310
	mr->enabled    = MLX4_MPT_DISABLED;
311 312 313 314 315 316 317 318 319 320 321 322 323
	mr->key	       = hw_index_to_key(mridx);

	return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
}

static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
			  struct mlx4_cmd_mailbox *mailbox,
			  int num_entries)
{
	return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
			MLX4_CMD_TIME_CLASS_A,  MLX4_CMD_WRAPPED);
}

324
int __mlx4_mpt_reserve(struct mlx4_dev *dev)
325 326 327 328 329
{
	struct mlx4_priv *priv = mlx4_priv(dev);

	return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
}
330

331
static int mlx4_mpt_reserve(struct mlx4_dev *dev)
332 333 334 335 336 337 338 339 340 341
{
	u64 out_param;

	if (mlx4_is_mfunc(dev)) {
		if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
				   MLX4_CMD_ALLOC_RES,
				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
			return -1;
		return get_param_l(&out_param);
	}
342
	return  __mlx4_mpt_reserve(dev);
343 344
}

345
void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
346 347 348 349 350 351
{
	struct mlx4_priv *priv = mlx4_priv(dev);

	mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
}

352
static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
353 354 355 356 357 358 359 360 361 362 363 364
{
	u64 in_param;

	if (mlx4_is_mfunc(dev)) {
		set_param_l(&in_param, index);
		if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
			       MLX4_CMD_FREE_RES,
			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
			mlx4_warn(dev, "Failed to release mr index:%d\n",
				  index);
		return;
	}
365
	__mlx4_mpt_release(dev, index);
366 367
}

368
int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
369 370 371 372 373 374
{
	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;

	return mlx4_table_get(dev, &mr_table->dmpt_table, index);
}

375
static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
376 377 378 379 380 381 382 383 384 385
{
	u64 param;

	if (mlx4_is_mfunc(dev)) {
		set_param_l(&param, index);
		return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
							MLX4_CMD_ALLOC_RES,
							MLX4_CMD_TIME_CLASS_A,
							MLX4_CMD_WRAPPED);
	}
386
	return __mlx4_mpt_alloc_icm(dev, index);
387 388
}

389
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
390 391 392 393 394 395
{
	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;

	mlx4_table_put(dev, &mr_table->dmpt_table, index);
}

396
static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
397 398 399 400 401 402 403 404 405 406 407 408
{
	u64 in_param;

	if (mlx4_is_mfunc(dev)) {
		set_param_l(&in_param, index);
		if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
			     MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
			     MLX4_CMD_WRAPPED))
			mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
				  index);
		return;
	}
409
	return __mlx4_mpt_free_icm(dev, index);
410 411 412 413 414 415 416 417
}

int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
		  int npages, int page_shift, struct mlx4_mr *mr)
{
	u32 index;
	int err;

418
	index = mlx4_mpt_reserve(dev);
419 420 421 422 423
	if (index == -1)
		return -ENOMEM;

	err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
				     access, npages, page_shift, mr);
424
	if (err)
425
		mlx4_mpt_release(dev, index);
426 427 428 429 430

	return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);

431
static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
432 433 434
{
	int err;

435
	if (mr->enabled == MLX4_MPT_EN_HW) {
436 437 438
		err = mlx4_HW2SW_MPT(dev, NULL,
				     key_to_hw_index(mr->key) &
				     (dev->caps.num_mpts - 1));
439 440 441 442 443
		if (err) {
			mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
			mlx4_warn(dev, "MR has MWs bound to it.\n");
			return err;
		}
444

445
		mr->enabled = MLX4_MPT_EN_SW;
446
	}
447
	mlx4_mtt_cleanup(dev, &mr->mtt);
448 449

	return 0;
450 451
}

452
int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
453
{
454 455 456 457 458
	int ret;

	ret = mlx4_mr_free_reserved(dev, mr);
	if (ret)
		return ret;
459
	if (mr->enabled)
460 461
		mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
	mlx4_mpt_release(dev, key_to_hw_index(mr->key));
462 463

	return 0;
464 465 466 467 468 469 470 471 472
}
EXPORT_SYMBOL_GPL(mlx4_mr_free);

int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
	struct mlx4_cmd_mailbox *mailbox;
	struct mlx4_mpt_entry *mpt_entry;
	int err;

473
	err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
474 475 476 477 478 479 480 481 482 483 484 485
	if (err)
		return err;

	mailbox = mlx4_alloc_cmd_mailbox(dev);
	if (IS_ERR(mailbox)) {
		err = PTR_ERR(mailbox);
		goto err_table;
	}
	mpt_entry = mailbox->buf;

	memset(mpt_entry, 0, sizeof *mpt_entry);

486
	mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO	 |
487 488 489 490
				       MLX4_MPT_FLAG_REGION	 |
				       mr->access);

	mpt_entry->key	       = cpu_to_be32(key_to_hw_index(mr->key));
491
	mpt_entry->pd_flags    = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
492 493 494
	mpt_entry->start       = cpu_to_be64(mr->iova);
	mpt_entry->length      = cpu_to_be64(mr->size);
	mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
495

496 497
	if (mr->mtt.order < 0) {
		mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
498
		mpt_entry->mtt_addr = 0;
499
	} else {
500 501
		mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
						  &mr->mtt));
502 503 504 505 506
	}

	if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
		/* fast register MR in free state */
		mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
507 508
		mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
						   MLX4_MPT_PD_FLAG_RAE);
509
		mpt_entry->mtt_sz    = cpu_to_be32(1 << mr->mtt.order);
510 511 512
	} else {
		mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
	}
513 514 515 516 517 518 519

	err = mlx4_SW2HW_MPT(dev, mailbox,
			     key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
	if (err) {
		mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
		goto err_cmd;
	}
520
	mr->enabled = MLX4_MPT_EN_HW;
521 522 523 524 525 526 527 528 529

	mlx4_free_cmd_mailbox(dev, mailbox);

	return 0;

err_cmd:
	mlx4_free_cmd_mailbox(dev, mailbox);

err_table:
530
	mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
531 532 533 534
	return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_enable);

535 536
static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
				int start_index, int npages, u64 *page_list)
537
{
538 539 540 541 542
	struct mlx4_priv *priv = mlx4_priv(dev);
	__be64 *mtts;
	dma_addr_t dma_handle;
	int i;

543 544
	mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
			       start_index, &dma_handle);
545 546 547 548

	if (!mtts)
		return -ENOMEM;

549 550 551
	dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
				npages * sizeof (u64), DMA_TO_DEVICE);

552 553 554
	for (i = 0; i < npages; ++i)
		mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);

555 556
	dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
				   npages * sizeof (u64), DMA_TO_DEVICE);
557 558

	return 0;
559 560
}

561
int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
562
		     int start_index, int npages, u64 *page_list)
563
{
564
	int err = 0;
565
	int chunk;
566 567 568 569 570 571 572 573 574
	int mtts_per_page;
	int max_mtts_first_page;

	/* compute how may mtts fit in the first page */
	mtts_per_page = PAGE_SIZE / sizeof(u64);
	max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
			      % mtts_per_page;

	chunk = min_t(int, max_mtts_first_page, npages);
575 576

	while (npages > 0) {
577
		err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
578
		if (err)
579 580 581 582
			return err;
		npages      -= chunk;
		start_index += chunk;
		page_list   += chunk;
583 584

		chunk = min_t(int, mtts_per_page, npages);
585
	}
586 587
	return err;
}
588

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
		   int start_index, int npages, u64 *page_list)
{
	struct mlx4_cmd_mailbox *mailbox = NULL;
	__be64 *inbox = NULL;
	int chunk;
	int err = 0;
	int i;

	if (mtt->order < 0)
		return -EINVAL;

	if (mlx4_is_mfunc(dev)) {
		mailbox = mlx4_alloc_cmd_mailbox(dev);
		if (IS_ERR(mailbox))
			return PTR_ERR(mailbox);
		inbox = mailbox->buf;

		while (npages > 0) {
608 609 610
			chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
				      npages);
			inbox[0] = cpu_to_be64(mtt->offset + start_index);
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
			inbox[1] = 0;
			for (i = 0; i < chunk; ++i)
				inbox[i + 2] = cpu_to_be64(page_list[i] |
					       MLX4_MTT_FLAG_PRESENT);
			err = mlx4_WRITE_MTT(dev, mailbox, chunk);
			if (err) {
				mlx4_free_cmd_mailbox(dev, mailbox);
				return err;
			}

			npages      -= chunk;
			start_index += chunk;
			page_list   += chunk;
		}
		mlx4_free_cmd_mailbox(dev, mailbox);
		return err;
	}

	return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
}
EXPORT_SYMBOL_GPL(mlx4_write_mtt);

int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
		       struct mlx4_buf *buf)
{
	u64 *page_list;
	int err;
	int i;

	page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
	if (!page_list)
		return -ENOMEM;

	for (i = 0; i < buf->npages; ++i)
		if (buf->nbufs == 1)
646
			page_list[i] = buf->direct.map + (i << buf->page_shift);
647
		else
648
			page_list[i] = buf->page_list[i].map;
649 650 651 652 653 654 655 656

	err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);

	kfree(page_list);
	return err;
}
EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);

R
Roland Dreier 已提交
657
int mlx4_init_mr_table(struct mlx4_dev *dev)
658
{
659 660
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_mr_table *mr_table = &priv->mr_table;
661 662
	int err;

663 664 665 666 667 668 669 670
	if (!is_power_of_2(dev->caps.num_mpts))
		return -EINVAL;

	/* Nothing to do for slaves - all MR handling is forwarded
	* to the master */
	if (mlx4_is_slave(dev))
		return 0;

671
	err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
672
			       ~0, dev->caps.reserved_mrws, 0);
673 674 675 676
	if (err)
		return err;

	err = mlx4_buddy_init(&mr_table->mtt_buddy,
677
			      ilog2((u32)dev->caps.num_mtts /
678
			      (1 << log_mtts_per_seg)));
679 680 681 682
	if (err)
		goto err_buddy;

	if (dev->caps.reserved_mtts) {
683 684 685 686
		priv->reserved_mtts =
			mlx4_alloc_mtt_range(dev,
					     fls(dev->caps.reserved_mtts - 1));
		if (priv->reserved_mtts < 0) {
687
			mlx4_warn(dev, "MTT table of order %u is too small.\n",
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
				  mr_table->mtt_buddy.max_order);
			err = -ENOMEM;
			goto err_reserve_mtts;
		}
	}

	return 0;

err_reserve_mtts:
	mlx4_buddy_cleanup(&mr_table->mtt_buddy);

err_buddy:
	mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);

	return err;
}

void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
{
707 708
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_mr_table *mr_table = &priv->mr_table;
709

710 711 712 713 714
	if (mlx4_is_slave(dev))
		return;
	if (priv->reserved_mtts >= 0)
		mlx4_free_mtt_range(dev, priv->reserved_mtts,
				    fls(dev->caps.reserved_mtts - 1));
715 716 717
	mlx4_buddy_cleanup(&mr_table->mtt_buddy);
	mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
}
J
Jack Morgenstein 已提交
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766

static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
				  int npages, u64 iova)
{
	int i, page_mask;

	if (npages > fmr->max_pages)
		return -EINVAL;

	page_mask = (1 << fmr->page_shift) - 1;

	/* We are getting page lists, so va must be page aligned. */
	if (iova & page_mask)
		return -EINVAL;

	/* Trust the user not to pass misaligned data in page_list */
	if (0)
		for (i = 0; i < npages; ++i) {
			if (page_list[i] & ~page_mask)
				return -EINVAL;
		}

	if (fmr->maps >= fmr->max_maps)
		return -EINVAL;

	return 0;
}

int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
		      int npages, u64 iova, u32 *lkey, u32 *rkey)
{
	u32 key;
	int i, err;

	err = mlx4_check_fmr(fmr, page_list, npages, iova);
	if (err)
		return err;

	++fmr->maps;

	key = key_to_hw_index(fmr->mr.key);
	key += dev->caps.num_mpts;
	*lkey = *rkey = fmr->mr.key = hw_index_to_key(key);

	*(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;

	/* Make sure MPT status is visible before writing MTT entries */
	wmb();

767 768 769
	dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
				npages * sizeof(u64), DMA_TO_DEVICE);

J
Jack Morgenstein 已提交
770 771 772
	for (i = 0; i < npages; ++i)
		fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);

773 774
	dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
				   npages * sizeof(u64), DMA_TO_DEVICE);
J
Jack Morgenstein 已提交
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798

	fmr->mpt->key    = cpu_to_be32(key);
	fmr->mpt->lkey   = cpu_to_be32(key);
	fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
	fmr->mpt->start  = cpu_to_be64(iova);

	/* Make MTT entries are visible before setting MPT status */
	wmb();

	*(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;

	/* Make sure MPT status is visible before consumer can use FMR */
	wmb();

	return 0;
}
EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);

int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err = -ENOMEM;

799 800 801
	if (max_maps > dev->caps.max_fmr_maps)
		return -EINVAL;

802
	if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
J
Jack Morgenstein 已提交
803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
		return -EINVAL;

	/* All MTTs must fit in the same page */
	if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
		return -EINVAL;

	fmr->page_shift = page_shift;
	fmr->max_pages  = max_pages;
	fmr->max_maps   = max_maps;
	fmr->maps = 0;

	err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
			    page_shift, &fmr->mr);
	if (err)
		return err;

	fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
820
				    fmr->mr.mtt.offset,
J
Jack Morgenstein 已提交
821
				    &fmr->dma_handle);
822

J
Jack Morgenstein 已提交
823 824 825 826 827 828 829 830
	if (!fmr->mtts) {
		err = -ENOMEM;
		goto err_free;
	}

	return 0;

err_free:
831
	(void) mlx4_mr_free(dev, &fmr->mr);
J
Jack Morgenstein 已提交
832 833 834 835 836 837
	return err;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);

int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
838 839 840 841 842 843 844 845 846 847 848 849 850
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err;

	err = mlx4_mr_enable(dev, &fmr->mr);
	if (err)
		return err;

	fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
				    key_to_hw_index(fmr->mr.key), NULL);
	if (!fmr->mpt)
		return -ENOMEM;

	return 0;
J
Jack Morgenstein 已提交
851 852 853 854 855 856
}
EXPORT_SYMBOL_GPL(mlx4_fmr_enable);

void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
		    u32 *lkey, u32 *rkey)
{
857 858 859
	struct mlx4_cmd_mailbox *mailbox;
	int err;

J
Jack Morgenstein 已提交
860 861 862 863 864
	if (!fmr->maps)
		return;

	fmr->maps = 0;

865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
	mailbox = mlx4_alloc_cmd_mailbox(dev);
	if (IS_ERR(mailbox)) {
		err = PTR_ERR(mailbox);
		printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
		       " failed (%d)\n", err);
		return;
	}

	err = mlx4_HW2SW_MPT(dev, NULL,
			     key_to_hw_index(fmr->mr.key) &
			     (dev->caps.num_mpts - 1));
	mlx4_free_cmd_mailbox(dev, mailbox);
	if (err) {
		printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
		       err);
		return;
	}
882
	fmr->mr.enabled = MLX4_MPT_EN_SW;
J
Jack Morgenstein 已提交
883 884 885 886 887
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);

int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
888 889
	int ret;

J
Jack Morgenstein 已提交
890 891 892
	if (fmr->maps)
		return -EBUSY;

893 894 895
	ret = mlx4_mr_free(dev, &fmr->mr);
	if (ret)
		return ret;
896
	fmr->mr.enabled = MLX4_MPT_DISABLED;
J
Jack Morgenstein 已提交
897 898 899 900 901 902 903

	return 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_free);

int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{
904
	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
905
			MLX4_CMD_NATIVE);
J
Jack Morgenstein 已提交
906 907
}
EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);