iser_memory.c 16.9 KB
Newer Older
1 2
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
O
Or Gerlitz 已提交
3
 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
A
Al Viro 已提交
37
#include <linux/highmem.h>
38 39 40 41 42
#include <linux/scatterlist.h>

#include "iscsi_iser.h"

#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
43

44 45 46
/**
 * iser_start_rdma_unaligned_sg
 */
47
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
48
					enum iser_data_dir cmd_dir)
49 50
{
	int dma_nents;
51
	struct ib_device *dev;
52
	char *mem = NULL;
53
	struct iser_data_buf *data = &iser_task->data[cmd_dir];
54 55 56
	unsigned long  cmd_data_len = data->data_len;

	if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
O
Or Gerlitz 已提交
57
		mem = (void *)__get_free_pages(GFP_ATOMIC,
58
		      ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
59
	else
O
Or Gerlitz 已提交
60
		mem = kmalloc(cmd_data_len, GFP_ATOMIC);
61 62 63 64 65 66 67 68 69

	if (mem == NULL) {
		iser_err("Failed to allocate mem size %d %d for copying sglist\n",
			 data->size,(int)cmd_data_len);
		return -ENOMEM;
	}

	if (cmd_dir == ISER_DIR_OUT) {
		/* copy the unaligned sg the buffer which is used for RDMA */
J
Jens Axboe 已提交
70 71
		struct scatterlist *sgl = (struct scatterlist *)data->buf;
		struct scatterlist *sg;
72 73 74
		int i;
		char *p, *from;

J
Jens Axboe 已提交
75 76
		p = mem;
		for_each_sg(sgl, sg, data->size, i) {
77
			from = kmap_atomic(sg_page(sg));
78
			memcpy(p,
J
Jens Axboe 已提交
79 80
			       from + sg->offset,
			       sg->length);
81
			kunmap_atomic(from);
J
Jens Axboe 已提交
82
			p += sg->length;
83 84 85
		}
	}

86 87 88 89
	sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
	iser_task->data_copy[cmd_dir].buf  =
		&iser_task->data_copy[cmd_dir].sg_single;
	iser_task->data_copy[cmd_dir].size = 1;
90

91
	iser_task->data_copy[cmd_dir].copy_buf  = mem;
92

93
	dev = iser_task->iser_conn->ib_conn->device->ib_device;
94
	dma_nents = ib_dma_map_sg(dev,
95
				  &iser_task->data_copy[cmd_dir].sg_single,
96 97 98
				  1,
				  (cmd_dir == ISER_DIR_OUT) ?
				  DMA_TO_DEVICE : DMA_FROM_DEVICE);
99 100
	BUG_ON(dma_nents == 0);

101
	iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
102 103 104 105 106 107
	return 0;
}

/**
 * iser_finalize_rdma_unaligned_sg
 */
108
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
109 110
				     enum iser_data_dir         cmd_dir)
{
111
	struct ib_device *dev;
112 113 114
	struct iser_data_buf *mem_copy;
	unsigned long  cmd_data_len;

115 116
	dev = iser_task->iser_conn->ib_conn->device->ib_device;
	mem_copy = &iser_task->data_copy[cmd_dir];
117

118 119 120
	ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
			(cmd_dir == ISER_DIR_OUT) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE);
121 122 123

	if (cmd_dir == ISER_DIR_IN) {
		char *mem;
J
Jens Axboe 已提交
124
		struct scatterlist *sgl, *sg;
125 126 127 128 129 130 131
		unsigned char *p, *to;
		unsigned int sg_size;
		int i;

		/* copy back read RDMA to unaligned sg */
		mem	= mem_copy->copy_buf;

132 133
		sgl	= (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
		sg_size = iser_task->data[ISER_DIR_IN].size;
134

J
Jens Axboe 已提交
135 136
		p = mem;
		for_each_sg(sgl, sg, sg_size, i) {
137
			to = kmap_atomic(sg_page(sg));
J
Jens Axboe 已提交
138
			memcpy(to + sg->offset,
139
			       p,
J
Jens Axboe 已提交
140
			       sg->length);
141
			kunmap_atomic(to);
J
Jens Axboe 已提交
142
			p += sg->length;
143 144 145
		}
	}

146
	cmd_data_len = iser_task->data[cmd_dir].data_len;
147 148 149

	if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
		free_pages((unsigned long)mem_copy->copy_buf,
150
			   ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
151 152 153 154 155 156
	else
		kfree(mem_copy->copy_buf);

	mem_copy->copy_buf = NULL;
}

157 158
#define IS_4K_ALIGNED(addr)	((((unsigned long)addr) & ~MASK_4K) == 0)

159 160 161 162 163 164 165 166 167 168 169 170
/**
 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
 * and returns the length of resulting physical address array (may be less than
 * the original due to possible compaction).
 *
 * we build a "page vec" under the assumption that the SG meets the RDMA
 * alignment requirements. Other then the first and last SG elements, all
 * the "internal" elements can be compacted into a list whose elements are
 * dma addresses of physical pages. The code supports also the weird case
 * where --few fragments of the same page-- are present in the SG as
 * consecutive elements. Also, it handles one entry SG.
 */
171

172
static int iser_sg_to_page_vec(struct iser_data_buf *data,
173 174
			       struct ib_device *ibdev, u64 *pages,
			       int *offset, int *data_size)
175
{
176 177
	struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
	u64 start_addr, end_addr, page, chunk_start = 0;
178
	unsigned long total_sz = 0;
179 180
	unsigned int dma_len;
	int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
181 182

	/* compute the offset of first element */
183
	*offset = (u64) sgl[0].offset & ~MASK_4K;
184

185 186
	new_chunk = 1;
	cur_page  = 0;
J
Jens Axboe 已提交
187
	for_each_sg(sgl, sg, data->dma_nents, i) {
188 189 190 191 192
		start_addr = ib_sg_dma_address(ibdev, sg);
		if (new_chunk)
			chunk_start = start_addr;
		dma_len = ib_sg_dma_len(ibdev, sg);
		end_addr = start_addr + dma_len;
193
		total_sz += dma_len;
194

195 196 197 198
		/* collect page fragments until aligned or end of SG list */
		if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
			new_chunk = 0;
			continue;
199
		}
200 201 202 203 204 205 206
		new_chunk = 1;

		/* address of the first page in the contiguous chunk;
		   masking relevant for the very first SG entry,
		   which might be unaligned */
		page = chunk_start & MASK_4K;
		do {
207
			pages[cur_page++] = page;
208
			page += SIZE_4K;
209
		} while (page < end_addr);
210
	}
211

212 213 214
	*data_size = total_sz;
	iser_dbg("page_vec->data_size:%d cur_page %d\n",
		 *data_size, cur_page);
215 216 217 218 219 220 221 222 223 224
	return cur_page;
}


/**
 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
 * for RDMA sub-list of a scatter-gather list of memory buffers, and  returns
 * the number of entries which are aligned correctly. Supports the case where
 * consecutive SG elements are actually fragments of the same physcial page.
 */
225 226
static int iser_data_buf_aligned_len(struct iser_data_buf *data,
				      struct ib_device *ibdev)
227
{
228 229 230 231 232 233
	struct scatterlist *sgl, *sg, *next_sg = NULL;
	u64 start_addr, end_addr;
	int i, ret_len, start_check = 0;

	if (data->dma_nents == 1)
		return 1;
234

J
Jens Axboe 已提交
235
	sgl = (struct scatterlist *)data->buf;
236
	start_addr  = ib_sg_dma_address(ibdev, sgl);
237

J
Jens Axboe 已提交
238
	for_each_sg(sgl, sg, data->dma_nents, i) {
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
		if (start_check && !IS_4K_ALIGNED(start_addr))
			break;

		next_sg = sg_next(sg);
		if (!next_sg)
			break;

		end_addr    = start_addr + ib_sg_dma_len(ibdev, sg);
		start_addr  = ib_sg_dma_address(ibdev, next_sg);

		if (end_addr == start_addr) {
			start_check = 0;
			continue;
		} else
			start_check = 1;

		if (!IS_4K_ALIGNED(end_addr))
			break;
257
	}
258
	ret_len = (next_sg) ? i : i+1;
259 260 261 262 263
	iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
		 ret_len, data->dma_nents, data);
	return ret_len;
}

264 265
static void iser_data_buf_dump(struct iser_data_buf *data,
			       struct ib_device *ibdev)
266
{
J
Jens Axboe 已提交
267 268
	struct scatterlist *sgl = (struct scatterlist *)data->buf;
	struct scatterlist *sg;
269 270
	int i;

J
Jens Axboe 已提交
271
	for_each_sg(sgl, sg, data->dma_nents, i)
272
		iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
E
Erez Zilber 已提交
273
			 "off:0x%x sz:0x%x dma_len:0x%x\n",
J
Jens Axboe 已提交
274
			 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
J
Jens Axboe 已提交
275
			 sg_page(sg), sg->offset,
J
Jens Axboe 已提交
276
			 sg->length, ib_sg_dma_len(ibdev, sg));
277 278 279 280 281 282 283 284 285 286 287 288 289
}

static void iser_dump_page_vec(struct iser_page_vec *page_vec)
{
	int i;

	iser_err("page vec length %d data size %d\n",
		 page_vec->length, page_vec->data_size);
	for (i = 0; i < page_vec->length; i++)
		iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
}

static void iser_page_vec_build(struct iser_data_buf *data,
290 291
				struct iser_page_vec *page_vec,
				struct ib_device *ibdev)
292 293 294 295 296 297 298
{
	int page_vec_len = 0;

	page_vec->length = 0;
	page_vec->offset = 0;

	iser_dbg("Translating sg sz: %d\n", data->dma_nents);
299 300 301 302
	page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages,
					   &page_vec->offset,
					   &page_vec->data_size);
	iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len);
303 304 305

	page_vec->length = page_vec_len;

306
	if (page_vec_len * SIZE_4K < page_vec->data_size) {
307
		iser_err("page_vec too short to hold this SG\n");
308
		iser_data_buf_dump(data, ibdev);
309 310 311 312 313
		iser_dump_page_vec(page_vec);
		BUG();
	}
}

314 315 316 317
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
			    struct iser_data_buf *data,
			    enum iser_data_dir iser_dir,
			    enum dma_data_direction dma_dir)
318
{
319
	struct ib_device *dev;
320

321 322
	iser_task->dir[iser_dir] = 1;
	dev = iser_task->iser_conn->ib_conn->device->ib_device;
323

324
	data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
325 326 327 328 329 330 331
	if (data->dma_nents == 0) {
		iser_err("dma_map_sg failed!!!\n");
		return -EINVAL;
	}
	return 0;
}

332
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
333
{
334
	struct ib_device *dev;
335 336
	struct iser_data_buf *data;

337
	dev = iser_task->iser_conn->ib_conn->device->ib_device;
338

339 340
	if (iser_task->dir[ISER_DIR_IN]) {
		data = &iser_task->data[ISER_DIR_IN];
341
		ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
342 343
	}

344 345
	if (iser_task->dir[ISER_DIR_OUT]) {
		data = &iser_task->data[ISER_DIR_OUT];
346
		ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
347 348 349
	}
}

350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
			      struct ib_device *ibdev,
			      enum iser_data_dir cmd_dir,
			      int aligned_len)
{
	struct iscsi_conn    *iscsi_conn = iser_task->iser_conn->iscsi_conn;
	struct iser_data_buf *mem = &iser_task->data[cmd_dir];

	iscsi_conn->fmr_unalign_cnt++;
	iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
		  aligned_len, mem->size);

	if (iser_debug_level > 0)
		iser_data_buf_dump(mem, ibdev);

	/* unmap the command data before accessing it */
	iser_dma_unmap_task_data(iser_task);

	/* allocate copy buf, if we are writing, copy the */
	/* unaligned scatterlist, dma map the copy        */
	if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
			return -ENOMEM;

	return 0;
}

376
/**
377 378
 * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
 * using FMR (if possible) obtaining rkey and va
379 380 381
 *
 * returns 0 on success, errno code on failure
 */
382 383
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
			  enum iser_data_dir cmd_dir)
384
{
385
	struct iser_conn     *ib_conn = iser_task->iser_conn->ib_conn;
386
	struct iser_device   *device = ib_conn->device;
387
	struct ib_device     *ibdev = device->ib_device;
388
	struct iser_data_buf *mem = &iser_task->data[cmd_dir];
389 390 391
	struct iser_regd_buf *regd_buf;
	int aligned_len;
	int err;
E
Erez Zilber 已提交
392
	int i;
393
	struct scatterlist *sg;
394

395
	regd_buf = &iser_task->rdma_regd[cmd_dir];
396

397
	aligned_len = iser_data_buf_aligned_len(mem, ibdev);
398
	if (aligned_len != mem->dma_nents) {
399 400 401 402 403 404
		err = fall_to_bounce_buf(iser_task, ibdev,
					 cmd_dir, aligned_len);
		if (err) {
			iser_err("failed to allocate bounce buffer\n");
			return err;
		}
405
		mem = &iser_task->data_copy[cmd_dir];
406 407
	}

408 409 410 411 412 413
	/* if there a single dma entry, FMR is not needed */
	if (mem->dma_nents == 1) {
		sg = (struct scatterlist *)mem->buf;

		regd_buf->reg.lkey = device->mr->lkey;
		regd_buf->reg.rkey = device->mr->rkey;
414 415
		regd_buf->reg.len  = ib_sg_dma_len(ibdev, &sg[0]);
		regd_buf->reg.va   = ib_sg_dma_address(ibdev, &sg[0]);
416
		regd_buf->reg.is_mr = 0;
417 418 419 420 421 422 423 424

		iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X  "
			 "va: 0x%08lX sz: %ld]\n",
			 (unsigned int)regd_buf->reg.lkey,
			 (unsigned int)regd_buf->reg.rkey,
			 (unsigned long)regd_buf->reg.va,
			 (unsigned long)regd_buf->reg.len);
	} else { /* use FMR for multiple dma entries */
425 426 427
		iser_page_vec_build(mem, ib_conn->fastreg.fmr.page_vec, ibdev);
		err = iser_reg_page_vec(ib_conn, ib_conn->fastreg.fmr.page_vec,
					&regd_buf->reg);
428
		if (err && err != -EAGAIN) {
429
			iser_data_buf_dump(mem, ibdev);
430 431 432
			iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
				 mem->dma_nents,
				 ntoh24(iser_task->desc.iscsi_header.dlength));
433
			iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
434 435 436 437
				 ib_conn->fastreg.fmr.page_vec->data_size,
				 ib_conn->fastreg.fmr.page_vec->length,
				 ib_conn->fastreg.fmr.page_vec->offset);
			for (i = 0; i < ib_conn->fastreg.fmr.page_vec->length; i++)
438
				iser_err("page_vec[%d] = 0x%llx\n", i,
439
					 (unsigned long long) ib_conn->fastreg.fmr.page_vec->pages[i]);
E
Erez Zilber 已提交
440
		}
441 442
		if (err)
			return err;
E
Erez Zilber 已提交
443
	}
444 445
	return 0;
}
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580

static int iser_fast_reg_mr(struct fast_reg_descriptor *desc,
			    struct iser_conn *ib_conn,
			    struct iser_regd_buf *regd_buf,
			    u32 offset, unsigned int data_size,
			    unsigned int page_list_len)
{
	struct ib_send_wr fastreg_wr, inv_wr;
	struct ib_send_wr *bad_wr, *wr = NULL;
	u8 key;
	int ret;

	if (!desc->valid) {
		memset(&inv_wr, 0, sizeof(inv_wr));
		inv_wr.opcode = IB_WR_LOCAL_INV;
		inv_wr.send_flags = IB_SEND_SIGNALED;
		inv_wr.ex.invalidate_rkey = desc->data_mr->rkey;
		wr = &inv_wr;
		/* Bump the key */
		key = (u8)(desc->data_mr->rkey & 0x000000FF);
		ib_update_fast_reg_key(desc->data_mr, ++key);
	}

	/* Prepare FASTREG WR */
	memset(&fastreg_wr, 0, sizeof(fastreg_wr));
	fastreg_wr.opcode = IB_WR_FAST_REG_MR;
	fastreg_wr.send_flags = IB_SEND_SIGNALED;
	fastreg_wr.wr.fast_reg.iova_start = desc->data_frpl->page_list[0] + offset;
	fastreg_wr.wr.fast_reg.page_list = desc->data_frpl;
	fastreg_wr.wr.fast_reg.page_list_len = page_list_len;
	fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
	fastreg_wr.wr.fast_reg.length = data_size;
	fastreg_wr.wr.fast_reg.rkey = desc->data_mr->rkey;
	fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE  |
					       IB_ACCESS_REMOTE_WRITE |
					       IB_ACCESS_REMOTE_READ);

	if (!wr) {
		wr = &fastreg_wr;
		atomic_inc(&ib_conn->post_send_buf_count);
	} else {
		wr->next = &fastreg_wr;
		atomic_add(2, &ib_conn->post_send_buf_count);
	}

	ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
	if (ret) {
		if (bad_wr->next)
			atomic_sub(2, &ib_conn->post_send_buf_count);
		else
			atomic_dec(&ib_conn->post_send_buf_count);
		iser_err("fast registration failed, ret:%d\n", ret);
		return ret;
	}
	desc->valid = false;

	regd_buf->reg.mem_h = desc;
	regd_buf->reg.lkey = desc->data_mr->lkey;
	regd_buf->reg.rkey = desc->data_mr->rkey;
	regd_buf->reg.va = desc->data_frpl->page_list[0] + offset;
	regd_buf->reg.len = data_size;
	regd_buf->reg.is_mr = 1;

	return ret;
}

/**
 * iser_reg_rdma_mem_frwr - Registers memory intended for RDMA,
 * using Fast Registration WR (if possible) obtaining rkey and va
 *
 * returns 0 on success, errno code on failure
 */
int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *iser_task,
			   enum iser_data_dir cmd_dir)
{
	struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
	struct ib_device *ibdev = device->ib_device;
	struct iser_data_buf *mem = &iser_task->data[cmd_dir];
	struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
	struct fast_reg_descriptor *desc;
	unsigned int data_size, page_list_len;
	int err, aligned_len;
	unsigned long flags;
	u32 offset;

	aligned_len = iser_data_buf_aligned_len(mem, ibdev);
	if (aligned_len != mem->dma_nents) {
		err = fall_to_bounce_buf(iser_task, ibdev,
					 cmd_dir, aligned_len);
		if (err) {
			iser_err("failed to allocate bounce buffer\n");
			return err;
		}
		mem = &iser_task->data_copy[cmd_dir];
	}

	/* if there a single dma entry, dma mr suffices */
	if (mem->dma_nents == 1) {
		struct scatterlist *sg = (struct scatterlist *)mem->buf;

		regd_buf->reg.lkey = device->mr->lkey;
		regd_buf->reg.rkey = device->mr->rkey;
		regd_buf->reg.len  = ib_sg_dma_len(ibdev, &sg[0]);
		regd_buf->reg.va   = ib_sg_dma_address(ibdev, &sg[0]);
		regd_buf->reg.is_mr = 0;
	} else {
		spin_lock_irqsave(&ib_conn->lock, flags);
		desc = list_first_entry(&ib_conn->fastreg.frwr.pool,
					struct fast_reg_descriptor, list);
		list_del(&desc->list);
		spin_unlock_irqrestore(&ib_conn->lock, flags);
		page_list_len = iser_sg_to_page_vec(mem, device->ib_device,
						    desc->data_frpl->page_list,
						    &offset, &data_size);

		if (page_list_len * SIZE_4K < data_size) {
			iser_err("fast reg page_list too short to hold this SG\n");
			err = -EINVAL;
			goto err_reg;
		}

		err = iser_fast_reg_mr(desc, ib_conn, regd_buf,
				       offset, data_size, page_list_len);
		if (err)
			goto err_reg;
	}

	return 0;
err_reg:
	spin_lock_irqsave(&ib_conn->lock, flags);
	list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool);
	spin_unlock_irqrestore(&ib_conn->lock, flags);
	return err;
}