iser_memory.c 17.0 KB
Newer Older
1 2
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
A
Al Viro 已提交
37
#include <linux/highmem.h>
38 39 40
#include <linux/scatterlist.h>

#include "iscsi_iser.h"
41 42 43 44 45 46 47 48 49 50
static
int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
		      struct iser_data_buf *mem,
		      struct iser_reg_resources *rsc,
		      struct iser_mem_reg *mem_reg);
static
int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
		     struct iser_data_buf *mem,
		     struct iser_reg_resources *rsc,
		     struct iser_mem_reg *mem_reg);
51

S
Sagi Grimberg 已提交
52 53 54
static struct iser_reg_ops fastreg_ops = {
	.alloc_reg_res	= iser_alloc_fastreg_pool,
	.free_reg_res	= iser_free_fastreg_pool,
55 56
	.reg_mem	= iser_fast_reg_mr,
	.unreg_mem	= iser_unreg_mem_fastreg,
57 58
	.reg_desc_get	= iser_reg_desc_get_fr,
	.reg_desc_put	= iser_reg_desc_put_fr,
S
Sagi Grimberg 已提交
59 60 61 62 63
};

static struct iser_reg_ops fmr_ops = {
	.alloc_reg_res	= iser_alloc_fmr_pool,
	.free_reg_res	= iser_free_fmr_pool,
64 65
	.reg_mem	= iser_fast_reg_fmr,
	.unreg_mem	= iser_unreg_mem_fmr,
66 67
	.reg_desc_get	= iser_reg_desc_get_fmr,
	.reg_desc_put	= iser_reg_desc_put_fmr,
S
Sagi Grimberg 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
};

int iser_assign_reg_ops(struct iser_device *device)
{
	struct ib_device_attr *dev_attr = &device->dev_attr;

	/* Assign function handles  - based on FMR support */
	if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
	    device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
		iser_info("FMR supported, using FMR for registration\n");
		device->reg_ops = &fmr_ops;
	} else
	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
		iser_info("FastReg supported, using FastReg for registration\n");
		device->reg_ops = &fastreg_ops;
	} else {
		iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
		return -1;
	}

	return 0;
}

91
struct iser_fr_desc *
92
iser_reg_desc_get_fr(struct ib_conn *ib_conn)
93
{
94
	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
95
	struct iser_fr_desc *desc;
96 97
	unsigned long flags;

98
	spin_lock_irqsave(&fr_pool->lock, flags);
99
	desc = list_first_entry(&fr_pool->list,
100
				struct iser_fr_desc, list);
101
	list_del(&desc->list);
102
	spin_unlock_irqrestore(&fr_pool->lock, flags);
103 104 105 106 107

	return desc;
}

void
108 109
iser_reg_desc_put_fr(struct ib_conn *ib_conn,
		     struct iser_fr_desc *desc)
110
{
111
	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
112 113
	unsigned long flags;

114
	spin_lock_irqsave(&fr_pool->lock, flags);
115
	list_add(&desc->list, &fr_pool->list);
116
	spin_unlock_irqrestore(&fr_pool->lock, flags);
117 118
}

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
struct iser_fr_desc *
iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
{
	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;

	return list_first_entry(&fr_pool->list,
				struct iser_fr_desc, list);
}

void
iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
		      struct iser_fr_desc *desc)
{
}

134 135
#define IS_4K_ALIGNED(addr)	((((unsigned long)addr) & ~MASK_4K) == 0)

136 137 138 139 140 141 142 143 144 145 146 147
/**
 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
 * and returns the length of resulting physical address array (may be less than
 * the original due to possible compaction).
 *
 * we build a "page vec" under the assumption that the SG meets the RDMA
 * alignment requirements. Other then the first and last SG elements, all
 * the "internal" elements can be compacted into a list whose elements are
 * dma addresses of physical pages. The code supports also the weird case
 * where --few fragments of the same page-- are present in the SG as
 * consecutive elements. Also, it handles one entry SG.
 */
148

149
static int iser_sg_to_page_vec(struct iser_data_buf *data,
150 151
			       struct ib_device *ibdev, u64 *pages,
			       int *offset, int *data_size)
152
{
153
	struct scatterlist *sg, *sgl = data->sg;
154
	u64 start_addr, end_addr, page, chunk_start = 0;
155
	unsigned long total_sz = 0;
156 157
	unsigned int dma_len;
	int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
158 159

	/* compute the offset of first element */
160
	*offset = (u64) sgl[0].offset & ~MASK_4K;
161

162 163
	new_chunk = 1;
	cur_page  = 0;
J
Jens Axboe 已提交
164
	for_each_sg(sgl, sg, data->dma_nents, i) {
165 166 167 168 169
		start_addr = ib_sg_dma_address(ibdev, sg);
		if (new_chunk)
			chunk_start = start_addr;
		dma_len = ib_sg_dma_len(ibdev, sg);
		end_addr = start_addr + dma_len;
170
		total_sz += dma_len;
171

172 173 174 175
		/* collect page fragments until aligned or end of SG list */
		if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
			new_chunk = 0;
			continue;
176
		}
177 178 179 180 181 182 183
		new_chunk = 1;

		/* address of the first page in the contiguous chunk;
		   masking relevant for the very first SG entry,
		   which might be unaligned */
		page = chunk_start & MASK_4K;
		do {
184
			pages[cur_page++] = page;
185
			page += SIZE_4K;
186
		} while (page < end_addr);
187
	}
188

189 190 191
	*data_size = total_sz;
	iser_dbg("page_vec->data_size:%d cur_page %d\n",
		 *data_size, cur_page);
192 193 194
	return cur_page;
}

195 196
static void iser_data_buf_dump(struct iser_data_buf *data,
			       struct ib_device *ibdev)
197
{
J
Jens Axboe 已提交
198
	struct scatterlist *sg;
199 200
	int i;

201
	for_each_sg(data->sg, sg, data->dma_nents, i)
202
		iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
E
Erez Zilber 已提交
203
			 "off:0x%x sz:0x%x dma_len:0x%x\n",
J
Jens Axboe 已提交
204
			 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
J
Jens Axboe 已提交
205
			 sg_page(sg), sg->offset,
J
Jens Axboe 已提交
206
			 sg->length, ib_sg_dma_len(ibdev, sg));
207 208 209 210 211 212 213 214 215 216 217 218
}

static void iser_dump_page_vec(struct iser_page_vec *page_vec)
{
	int i;

	iser_err("page vec length %d data size %d\n",
		 page_vec->length, page_vec->data_size);
	for (i = 0; i < page_vec->length; i++)
		iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
}

219 220 221 222
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
			    struct iser_data_buf *data,
			    enum iser_data_dir iser_dir,
			    enum dma_data_direction dma_dir)
223
{
224
	struct ib_device *dev;
225

226
	iser_task->dir[iser_dir] = 1;
S
Sagi Grimberg 已提交
227
	dev = iser_task->iser_conn->ib_conn.device->ib_device;
228

229
	data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
230 231 232 233 234 235 236
	if (data->dma_nents == 0) {
		iser_err("dma_map_sg failed!!!\n");
		return -EINVAL;
	}
	return 0;
}

237
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
238 239
			      struct iser_data_buf *data,
			      enum dma_data_direction dir)
240
{
241
	struct ib_device *dev;
242

S
Sagi Grimberg 已提交
243
	dev = iser_task->iser_conn->ib_conn.device->ib_device;
244
	ib_dma_unmap_sg(dev, data->sg, data->size, dir);
245 246
}

247 248 249 250 251 252
static int
iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
	     struct iser_mem_reg *reg)
{
	struct scatterlist *sg = mem->sg;

253
	reg->sge.lkey = device->pd->local_dma_lkey;
254 255 256 257 258 259 260 261 262 263 264
	reg->rkey = device->mr->rkey;
	reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
	reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);

	iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
		 " length=0x%x\n", reg->sge.lkey, reg->rkey,
		 reg->sge.addr, reg->sge.length);

	return 0;
}

265 266 267 268 269 270
/**
 * iser_reg_page_vec - Register physical memory
 *
 * returns: 0 on success, errno code on failure
 */
static
271
int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
272
		      struct iser_data_buf *mem,
273
		      struct iser_reg_resources *rsc,
274
		      struct iser_mem_reg *reg)
275
{
276 277
	struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
278 279
	struct iser_page_vec *page_vec = rsc->page_vec;
	struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
280 281 282 283 284 285 286 287 288 289 290 291 292 293
	struct ib_pool_fmr *fmr;
	int ret, plen;

	plen = iser_sg_to_page_vec(mem, device->ib_device,
				   page_vec->pages,
				   &page_vec->offset,
				   &page_vec->data_size);
	page_vec->length = plen;
	if (plen * SIZE_4K < page_vec->data_size) {
		iser_err("page vec too short to hold this SG\n");
		iser_data_buf_dump(mem, device->ib_device);
		iser_dump_page_vec(page_vec);
		return -EINVAL;
	}
294

295
	fmr  = ib_fmr_pool_map_phys(fmr_pool,
296
				    page_vec->pages,
297
				    page_vec->length,
298 299 300 301 302
				    page_vec->pages[0]);
	if (IS_ERR(fmr)) {
		ret = PTR_ERR(fmr);
		iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
		return ret;
303 304
	}

305 306 307 308 309
	reg->sge.lkey = fmr->fmr->lkey;
	reg->rkey = fmr->fmr->rkey;
	reg->sge.addr = page_vec->pages[0] + page_vec->offset;
	reg->sge.length = page_vec->data_size;
	reg->mem_h = fmr;
310

311 312 313 314
	iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
		 " length=0x%x\n", reg->sge.lkey, reg->rkey,
		 reg->sge.addr, reg->sge.length);

315 316 317 318 319 320 321 322 323 324
	return 0;
}

/**
 * Unregister (previosuly registered using FMR) memory.
 * If memory is non-FMR does nothing.
 */
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
			enum iser_data_dir cmd_dir)
{
325
	struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
	int ret;

	if (!reg->mem_h)
		return;

	iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);

	ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
	if (ret)
		iser_err("ib_fmr_pool_unmap failed %d\n", ret);

	reg->mem_h = NULL;
}

void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
			    enum iser_data_dir cmd_dir)
{
343
	struct iser_device *device = iser_task->iser_conn->ib_conn.device;
344
	struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
345

346
	if (!reg->mem_h)
347 348
		return;

349 350
	device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn,
				     reg->mem_h);
351 352 353
	reg->mem_h = NULL;
}

S
Sagi Grimberg 已提交
354
static void
355 356 357
iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
		    struct ib_sig_domain *domain)
{
358
	domain->sig_type = IB_SIG_TYPE_T10_DIF;
S
Sagi Grimberg 已提交
359 360
	domain->sig.dif.pi_interval = scsi_prot_interval(sc);
	domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
361 362 363 364 365 366 367
	/*
	 * At the moment we hard code those, but in the future
	 * we will take them from sc.
	 */
	domain->sig.dif.apptag_check_mask = 0xffff;
	domain->sig.dif.app_escape = true;
	domain->sig.dif.ref_escape = true;
S
Sagi Grimberg 已提交
368
	if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
369
		domain->sig.dif.ref_remap = true;
370
};
371 372 373 374 375 376 377

static int
iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
{
	switch (scsi_get_prot_op(sc)) {
	case SCSI_PROT_WRITE_INSERT:
	case SCSI_PROT_READ_STRIP:
378
		sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
379
		iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
380 381 382 383
		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
		break;
	case SCSI_PROT_READ_INSERT:
	case SCSI_PROT_WRITE_STRIP:
384
		sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
385
		iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
S
Sagi Grimberg 已提交
386 387
		sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
						IB_T10DIF_CSUM : IB_T10DIF_CRC;
388 389 390
		break;
	case SCSI_PROT_READ_PASS:
	case SCSI_PROT_WRITE_PASS:
391
		iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
392
		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
393
		iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
S
Sagi Grimberg 已提交
394 395
		sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
						IB_T10DIF_CSUM : IB_T10DIF_CRC;
396 397 398 399 400 401
		break;
	default:
		iser_err("Unsupported PI operation %d\n",
			 scsi_get_prot_op(sc));
		return -EINVAL;
	}
402

403 404 405
	return 0;
}

S
Sagi Grimberg 已提交
406
static inline void
407 408
iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{
S
Sagi Grimberg 已提交
409 410 411 412 413
	*mask = 0;
	if (sc->prot_flags & SCSI_PROT_REF_CHECK)
		*mask |= ISER_CHECK_REFTAG;
	if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
		*mask |= ISER_CHECK_GUARD;
414 415
}

416 417 418 419 420 421 422 423
static void
iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
{
	u32 rkey;

	inv_wr->opcode = IB_WR_LOCAL_INV;
	inv_wr->wr_id = ISER_FASTREG_LI_WRID;
	inv_wr->ex.invalidate_rkey = mr->rkey;
424 425
	inv_wr->send_flags = 0;
	inv_wr->num_sge = 0;
426 427 428 429 430

	rkey = ib_inc_rkey(mr->rkey);
	ib_update_fast_reg_key(mr, rkey);
}

431 432
static int
iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
433
		struct iser_pi_context *pi_ctx,
434 435 436
		struct iser_mem_reg *data_reg,
		struct iser_mem_reg *prot_reg,
		struct iser_mem_reg *sig_reg)
437
{
438 439
	struct iser_tx_desc *tx_desc = &iser_task->desc;
	struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs;
C
Christoph Hellwig 已提交
440
	struct ib_sig_handover_wr *wr;
441 442
	int ret;

443 444
	memset(sig_attrs, 0, sizeof(*sig_attrs));
	ret = iser_set_sig_attrs(iser_task->sc, sig_attrs);
445 446 447
	if (ret)
		goto err;

448
	iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
449

C
Christoph Hellwig 已提交
450 451 452 453 454 455 456 457 458 459 460
	if (!pi_ctx->sig_mr_valid)
		iser_inv_rkey(iser_tx_next_wr(tx_desc), pi_ctx->sig_mr);

	wr = sig_handover_wr(iser_tx_next_wr(tx_desc));
	wr->wr.opcode = IB_WR_REG_SIG_MR;
	wr->wr.wr_id = ISER_FASTREG_LI_WRID;
	wr->wr.sg_list = &data_reg->sge;
	wr->wr.num_sge = 1;
	wr->wr.send_flags = 0;
	wr->sig_attrs = sig_attrs;
	wr->sig_mr = pi_ctx->sig_mr;
461
	if (scsi_prot_sg_count(iser_task->sc))
C
Christoph Hellwig 已提交
462
		wr->prot = &prot_reg->sge;
463
	else
C
Christoph Hellwig 已提交
464 465 466 467
		wr->prot = NULL;
	wr->access_flags = IB_ACCESS_LOCAL_WRITE |
			   IB_ACCESS_REMOTE_READ |
			   IB_ACCESS_REMOTE_WRITE;
468
	pi_ctx->sig_mr_valid = 0;
469

470 471 472 473
	sig_reg->sge.lkey = pi_ctx->sig_mr->lkey;
	sig_reg->rkey = pi_ctx->sig_mr->rkey;
	sig_reg->sge.addr = 0;
	sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
474

475
	iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
476 477
		 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
		 sig_reg->sge.length);
478 479 480 481
err:
	return ret;
}

482 483
static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
			    struct iser_data_buf *mem,
484
			    struct iser_reg_resources *rsc,
485
			    struct iser_mem_reg *reg)
486
{
487
	struct iser_tx_desc *tx_desc = &iser_task->desc;
488 489 490
	struct ib_mr *mr = rsc->mr;
	struct ib_reg_wr *wr;
	int n;
491

C
Christoph Hellwig 已提交
492 493 494
	if (!rsc->mr_valid)
		iser_inv_rkey(iser_tx_next_wr(tx_desc), mr);

495 496 497 498 499 500 501 502 503
	n = ib_map_mr_sg(mr, mem->sg, mem->size, SIZE_4K);
	if (unlikely(n != mem->size)) {
		iser_err("failed to map sg (%d/%d)\n",
			 n, mem->size);
		return n < 0 ? n : -EINVAL;
	}

	wr = reg_wr(iser_tx_next_wr(tx_desc));
	wr->wr.opcode = IB_WR_REG_MR;
C
Christoph Hellwig 已提交
504 505
	wr->wr.wr_id = ISER_FASTREG_LI_WRID;
	wr->wr.send_flags = 0;
506 507 508 509 510 511 512
	wr->wr.num_sge = 0;
	wr->mr = mr;
	wr->key = mr->rkey;
	wr->access = IB_ACCESS_LOCAL_WRITE  |
		     IB_ACCESS_REMOTE_WRITE |
		     IB_ACCESS_REMOTE_READ;

513
	rsc->mr_valid = 0;
514

515 516
	reg->sge.lkey = mr->lkey;
	reg->rkey = mr->rkey;
517 518
	reg->sge.addr = mr->iova;
	reg->sge.length = mr->length;
519

520 521
	iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n",
		 reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length);
522

523
	return 0;
524 525
}

526 527 528 529
static int
iser_reg_prot_sg(struct iscsi_iser_task *task,
		 struct iser_data_buf *mem,
		 struct iser_fr_desc *desc,
530
		 bool use_dma_key,
531 532 533 534
		 struct iser_mem_reg *reg)
{
	struct iser_device *device = task->iser_conn->ib_conn.device;

535
	if (use_dma_key)
536 537 538 539 540 541 542 543 544
		return iser_reg_dma(device, mem, reg);

	return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
}

static int
iser_reg_data_sg(struct iscsi_iser_task *task,
		 struct iser_data_buf *mem,
		 struct iser_fr_desc *desc,
545
		 bool use_dma_key,
546 547 548 549
		 struct iser_mem_reg *reg)
{
	struct iser_device *device = task->iser_conn->ib_conn.device;

550
	if (use_dma_key)
551 552 553 554 555 556 557 558 559 560 561 562
		return iser_reg_dma(device, mem, reg);

	return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
}

int iser_reg_rdma_mem(struct iscsi_iser_task *task,
		      enum iser_data_dir dir)
{
	struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
	struct iser_data_buf *mem = &task->data[dir];
	struct iser_mem_reg *reg = &task->rdma_reg[dir];
563
	struct iser_mem_reg *data_reg;
564
	struct iser_fr_desc *desc = NULL;
565
	bool use_dma_key;
566 567
	int err;

568 569 570 571
	use_dma_key = (mem->dma_nents == 1 && !iser_always_reg &&
		       scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL);

	if (!use_dma_key) {
572
		desc = device->reg_ops->reg_desc_get(ib_conn);
573
		reg->mem_h = desc;
574
	}
575

576 577 578 579 580
	if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL)
		data_reg = reg;
	else
		data_reg = &task->desc.data_reg;

581
	err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
582
	if (unlikely(err))
583 584
		goto err_reg;

585
	if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
586
		struct iser_mem_reg *prot_reg = &task->desc.prot_reg;
587

588 589
		if (scsi_prot_sg_count(task->sc)) {
			mem = &task->prot[dir];
590 591
			err = iser_reg_prot_sg(task, mem, desc,
					       use_dma_key, prot_reg);
592
			if (unlikely(err))
593 594 595
				goto err_reg;
		}

596 597
		err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg,
				      prot_reg, reg);
598 599 600
		if (unlikely(err))
			goto err_reg;

601
		desc->pi_ctx->sig_protected = 1;
602
	}
603

604
	return 0;
605

606
err_reg:
607
	if (desc)
608
		device->reg_ops->reg_desc_put(ib_conn, desc);
609

610 611
	return err;
}
612 613 614 615 616 617 618 619

void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
			 enum iser_data_dir dir)
{
	struct iser_device *device = task->iser_conn->ib_conn.device;

	device->reg_ops->unreg_mem(task, dir);
}