iser_initiator.c 16.4 KB
Newer Older
1 2
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
O
Or Gerlitz 已提交
3
 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/kfifo.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>

#include "iscsi_iser.h"

/* Register user buffer memory and initialize passive rdma
 *  dto descriptor. Total data size is stored in
45
 *  iser_task->data[ISER_DIR_IN].data_len
46
 */
47
static int iser_prepare_read_cmd(struct iscsi_task *task,
48 49 50
				 unsigned int edtl)

{
51
	struct iscsi_iser_task *iser_task = task->dd_data;
52 53
	struct iser_regd_buf *regd_buf;
	int err;
54 55
	struct iser_hdr *hdr = &iser_task->desc.iser_header;
	struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
56

57
	err = iser_dma_map_task_data(iser_task,
58 59 60 61 62 63
				     buf_in,
				     ISER_DIR_IN,
				     DMA_FROM_DEVICE);
	if (err)
		return err;

64
	if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
65 66
		iser_err("Total data length: %ld, less than EDTL: "
			 "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
67 68
			 iser_task->data[ISER_DIR_IN].data_len, edtl,
			 task->itt, iser_task->iser_conn);
69 70 71
		return -EINVAL;
	}

72
	err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
73 74 75 76
	if (err) {
		iser_err("Failed to set up Data-IN RDMA\n");
		return err;
	}
77
	regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
78 79 80 81 82 83

	hdr->flags    |= ISER_RSV;
	hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
	hdr->read_va   = cpu_to_be64(regd_buf->reg.va);

	iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
84
		 task->itt, regd_buf->reg.rkey,
85 86 87 88 89 90 91
		 (unsigned long long)regd_buf->reg.va);

	return 0;
}

/* Register user buffer memory and initialize passive rdma
 *  dto descriptor. Total data size is stored in
92
 *  task->data[ISER_DIR_OUT].data_len
93 94
 */
static int
95
iser_prepare_write_cmd(struct iscsi_task *task,
96 97 98 99
		       unsigned int imm_sz,
		       unsigned int unsol_sz,
		       unsigned int edtl)
{
100
	struct iscsi_iser_task *iser_task = task->dd_data;
101 102
	struct iser_regd_buf *regd_buf;
	int err;
103 104
	struct iser_hdr *hdr = &iser_task->desc.iser_header;
	struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
105
	struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
106

107
	err = iser_dma_map_task_data(iser_task,
108 109 110 111 112 113
				     buf_out,
				     ISER_DIR_OUT,
				     DMA_TO_DEVICE);
	if (err)
		return err;

114
	if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
115 116
		iser_err("Total data length: %ld, less than EDTL: %d, "
			 "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
117 118
			 iser_task->data[ISER_DIR_OUT].data_len,
			 edtl, task->itt, task->conn);
119 120 121
		return -EINVAL;
	}

122
	err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
123 124 125 126 127
	if (err != 0) {
		iser_err("Failed to register write cmd RDMA mem\n");
		return err;
	}

128
	regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
129 130 131 132 133 134 135 136

	if (unsol_sz < edtl) {
		hdr->flags     |= ISER_WSV;
		hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey);
		hdr->write_va   = cpu_to_be64(regd_buf->reg.va + unsol_sz);

		iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
			 "VA:%#llX + unsol:%d\n",
137
			 task->itt, regd_buf->reg.rkey,
138 139 140 141 142
			 (unsigned long long)regd_buf->reg.va, unsol_sz);
	}

	if (imm_sz > 0) {
		iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
143
			 task->itt, imm_sz);
144 145 146 147
		tx_dsg->addr   = regd_buf->reg.va;
		tx_dsg->length = imm_sz;
		tx_dsg->lkey   = regd_buf->reg.lkey;
		iser_task->desc.num_sge = 2;
148 149 150 151 152 153
	}

	return 0;
}

/* creates a new tx descriptor and adds header regd buffer */
154 155
static void iser_create_send_desc(struct iser_conn	*ib_conn,
				  struct iser_tx_desc	*tx_desc)
156
{
157
	struct iser_device *device = ib_conn->device;
158

159 160
	ib_dma_sync_single_for_cpu(device->ib_device,
		tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
161 162 163 164

	memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
	tx_desc->iser_header.flags = ISER_VER;

165 166 167 168 169 170
	tx_desc->num_sge = 1;

	if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
		tx_desc->tx_sg[0].lkey = device->mr->lkey;
		iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
	}
171 172
}

173

174
int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
{
	int i, j;
	u64 dma_addr;
	struct iser_rx_desc *rx_desc;
	struct ib_sge       *rx_sg;
	struct iser_device  *device = ib_conn->device;

	ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
				sizeof(struct iser_rx_desc), GFP_KERNEL);
	if (!ib_conn->rx_descs)
		goto rx_desc_alloc_fail;

	rx_desc = ib_conn->rx_descs;

	for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
		dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
					ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
		if (ib_dma_mapping_error(device->ib_device, dma_addr))
			goto rx_desc_dma_map_failed;

		rx_desc->dma_addr = dma_addr;

		rx_sg = &rx_desc->rx_sg;
		rx_sg->addr   = rx_desc->dma_addr;
		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
		rx_sg->lkey   = device->mr->lkey;
	}

	ib_conn->rx_desc_head = 0;
	return 0;

rx_desc_dma_map_failed:
	rx_desc = ib_conn->rx_descs;
	for (j = 0; j < i; j++, rx_desc++)
		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
			ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
	kfree(ib_conn->rx_descs);
	ib_conn->rx_descs = NULL;
rx_desc_alloc_fail:
	iser_err("failed allocating rx descriptors / data buffers\n");
	return -ENOMEM;
}

void iser_free_rx_descriptors(struct iser_conn *ib_conn)
{
	int i;
	struct iser_rx_desc *rx_desc;
	struct iser_device *device = ib_conn->device;

	if (!ib_conn->rx_descs)
		return;

	rx_desc = ib_conn->rx_descs;
	for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
			ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
	kfree(ib_conn->rx_descs);
}

234
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
235 236
{
	struct iscsi_iser_conn *iser_conn = conn->dd_data;
237
	struct iscsi_session *session = conn->session;
238

239 240 241 242
	iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
	/* check if this is the last login - going to full feature phase */
	if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
		return 0;
243

244 245 246 247 248 249 250
	/*
	 * Check that there is one posted recv buffer (for the last login
	 * response) and no posted send buffers left - they must have been
	 * consumed during previous login phases.
	 */
	WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1);
	WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
251

252 253 254 255 256 257 258
	if (session->discovery_sess) {
		iser_info("Discovery session, re-using login RX buffer\n");
		return 0;
	} else
		iser_info("Normal session, posting batch of RX %d buffers\n",
			  ISER_MIN_POSTED_RX);

259
	/* Initial post receive buffers */
260 261 262
	if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
		return -ENOMEM;

263 264 265 266 267 268
	return 0;
}

/**
 * iser_send_command - send command PDU
 */
269
int iser_send_command(struct iscsi_conn *conn,
270
		      struct iscsi_task *task)
271 272
{
	struct iscsi_iser_conn *iser_conn = conn->dd_data;
273
	struct iscsi_iser_task *iser_task = task->dd_data;
274
	unsigned long edtl;
275
	int err;
276
	struct iser_data_buf *data_buf;
277
	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
278
	struct scsi_cmnd *sc  =  task->sc;
279
	struct iser_tx_desc *tx_desc = &iser_task->desc;
280 281 282 283

	edtl = ntohl(hdr->data_length);

	/* build the tx desc regd header and add it to the tx desc dto */
284 285
	tx_desc->type = ISCSI_TX_SCSI_COMMAND;
	iser_create_send_desc(iser_conn->ib_conn, tx_desc);
286 287

	if (hdr->flags & ISCSI_FLAG_CMD_READ)
288
		data_buf = &iser_task->data[ISER_DIR_IN];
289
	else
290
		data_buf = &iser_task->data[ISER_DIR_OUT];
291

292 293 294
	if (scsi_sg_count(sc)) { /* using a scatter list */
		data_buf->buf  = scsi_sglist(sc);
		data_buf->size = scsi_sg_count(sc);
295 296
	}

297
	data_buf->data_len = scsi_bufflen(sc);
298 299

	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
300
		err = iser_prepare_read_cmd(task, edtl);
301 302 303 304
		if (err)
			goto send_command_error;
	}
	if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
305 306 307
		err = iser_prepare_write_cmd(task,
					     task->imm_count,
				             task->imm_count +
308
					     task->unsol_r2t.data_length,
309 310 311 312 313
					     edtl);
		if (err)
			goto send_command_error;
	}

314
	iser_task->status = ISER_TASK_STATUS_STARTED;
315

316
	err = iser_post_send(iser_conn->ib_conn, tx_desc);
317 318 319 320
	if (!err)
		return 0;

send_command_error:
321
	iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
322 323 324 325 326 327
	return err;
}

/**
 * iser_send_data_out - send data out PDU
 */
328
int iser_send_data_out(struct iscsi_conn *conn,
329
		       struct iscsi_task *task,
330 331 332
		       struct iscsi_data *hdr)
{
	struct iscsi_iser_conn *iser_conn = conn->dd_data;
333
	struct iscsi_iser_task *iser_task = task->dd_data;
334 335
	struct iser_tx_desc *tx_desc = NULL;
	struct iser_regd_buf *regd_buf;
336 337
	unsigned long buf_offset;
	unsigned long data_seg_len;
338
	uint32_t itt;
339
	int err = 0;
340 341
	struct ib_sge *tx_dsg;

342
	itt = (__force uint32_t)hdr->itt;
343 344 345 346 347 348
	data_seg_len = ntoh24(hdr->dlength);
	buf_offset   = ntohl(hdr->offset);

	iser_dbg("%s itt %d dseg_len %d offset %d\n",
		 __func__,(int)itt,(int)data_seg_len,(int)buf_offset);

O
Or Gerlitz 已提交
349
	tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
350 351 352 353 354 355
	if (tx_desc == NULL) {
		iser_err("Failed to alloc desc for post dataout\n");
		return -ENOMEM;
	}

	tx_desc->type = ISCSI_TX_DATAOUT;
356
	tx_desc->iser_header.flags = ISER_VER;
357 358
	memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));

359 360
	/* build the tx desc */
	iser_initialize_task_headers(task, tx_desc);
361

362 363 364 365 366 367
	regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
	tx_dsg = &tx_desc->tx_sg[1];
	tx_dsg->addr    = regd_buf->reg.va + buf_offset;
	tx_dsg->length  = data_seg_len;
	tx_dsg->lkey    = regd_buf->reg.lkey;
	tx_desc->num_sge = 2;
368

369
	if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
370 371 372
		iser_err("Offset:%ld & DSL:%ld in Data-Out "
			 "inconsistent with total len:%ld, itt:%d\n",
			 buf_offset, data_seg_len,
373
			 iser_task->data[ISER_DIR_OUT].data_len, itt);
374 375 376 377 378 379 380
		err = -EINVAL;
		goto send_data_out_error;
	}
	iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
		 itt, buf_offset, data_seg_len);


381
	err = iser_post_send(iser_conn->ib_conn, tx_desc);
382 383 384 385 386 387 388 389 390 391
	if (!err)
		return 0;

send_data_out_error:
	kmem_cache_free(ig.desc_cache, tx_desc);
	iser_err("conn %p failed err %d\n",conn, err);
	return err;
}

int iser_send_control(struct iscsi_conn *conn,
392
		      struct iscsi_task *task)
393 394
{
	struct iscsi_iser_conn *iser_conn = conn->dd_data;
395
	struct iscsi_iser_task *iser_task = task->dd_data;
396
	struct iser_tx_desc *mdesc = &iser_task->desc;
397
	unsigned long data_seg_len;
398
	int err = 0;
399
	struct iser_device *device;
400
	struct iser_conn *ib_conn = iser_conn->ib_conn;
401 402 403

	/* build the tx desc regd header and add it to the tx desc dto */
	mdesc->type = ISCSI_TX_CONTROL;
404
	iser_create_send_desc(iser_conn->ib_conn, mdesc);
405 406 407

	device = iser_conn->ib_conn->device;

408
	data_seg_len = ntoh24(task->hdr->dlength);
409 410

	if (data_seg_len > 0) {
411 412 413 414 415
		struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
		if (task != conn->login_task) {
			iser_err("data present on non login task!!!\n");
			goto send_control_error;
		}
416 417 418 419 420 421

		ib_dma_sync_single_for_cpu(device->ib_device,
			ib_conn->login_req_dma, task->data_count,
			DMA_TO_DEVICE);

		memcpy(iser_conn->ib_conn->login_req_buf, task->data,
422
							task->data_count);
423 424 425 426 427 428

		ib_dma_sync_single_for_device(device->ib_device,
			ib_conn->login_req_dma, task->data_count,
			DMA_TO_DEVICE);

		tx_dsg->addr    = iser_conn->ib_conn->login_req_dma;
O
Or Gerlitz 已提交
429
		tx_dsg->length  = task->data_count;
430 431
		tx_dsg->lkey    = device->mr->lkey;
		mdesc->num_sge = 2;
432 433
	}

434
	if (task == conn->login_task) {
435 436
		iser_dbg("op %x dsl %lx, posting login rx buffer\n",
			 task->hdr->opcode, data_seg_len);
437 438 439
		err = iser_post_recvl(iser_conn->ib_conn);
		if (err)
			goto send_control_error;
440 441 442
		err = iser_post_rx_bufs(conn, task->hdr);
		if (err)
			goto send_control_error;
443 444
	}

445
	err = iser_post_send(iser_conn->ib_conn, mdesc);
446 447 448 449 450 451 452 453 454 455 456
	if (!err)
		return 0;

send_control_error:
	iser_err("conn %p failed err %d\n",conn, err);
	return err;
}

/**
 * iser_rcv_dto_completion - recv DTO completion
 */
457 458 459
void iser_rcv_completion(struct iser_rx_desc *rx_desc,
			 unsigned long rx_xfer_len,
			 struct iser_conn *ib_conn)
460
{
461
	struct iscsi_iser_conn *conn = ib_conn->iser_conn;
462
	struct iscsi_hdr *hdr;
463 464 465 466
	u64 rx_dma;
	int rx_buflen, outstanding, count, err;

	/* differentiate between login to all other PDUs */
467 468
	if ((char *)rx_desc == ib_conn->login_resp_buf) {
		rx_dma = ib_conn->login_resp_dma;
469 470 471 472 473
		rx_buflen = ISER_RX_LOGIN_SIZE;
	} else {
		rx_dma = rx_desc->dma_addr;
		rx_buflen = ISER_RX_PAYLOAD_SIZE;
	}
474

475 476
	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
			rx_buflen, DMA_FROM_DEVICE);
477

478
	hdr = &rx_desc->iscsi_header;
479

480 481
	iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
			hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
482

483 484
	iscsi_iser_recv(conn->iscsi_conn, hdr,
		rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN);
485

486 487
	ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
			rx_buflen, DMA_FROM_DEVICE);
488 489 490 491 492

	/* decrementing conn->post_recv_buf_count only --after-- freeing the   *
	 * task eliminates the need to worry on tasks which are completed in   *
	 * parallel to the execution of iser_conn_term. So the code that waits *
	 * for the posted rx bufs refcount to become zero handles everything   */
493
	conn->ib_conn->post_recv_buf_count--;
494

495
	if (rx_dma == ib_conn->login_resp_dma)
496 497
		return;

498
	outstanding = ib_conn->post_recv_buf_count;
499 500 501 502 503 504 505
	if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) {
		count = min(ISER_QP_MAX_RECV_DTOS - outstanding,
						ISER_MIN_POSTED_RX);
		err = iser_post_recvm(ib_conn, count);
		if (err)
			iser_err("posting %d rx bufs err %d\n", count, err);
	}
506 507
}

508 509
void iser_snd_completion(struct iser_tx_desc *tx_desc,
			struct iser_conn *ib_conn)
510
{
511
	struct iscsi_task *task;
512
	struct iser_device *device = ib_conn->device;
513

514 515 516
	if (tx_desc->type == ISCSI_TX_DATAOUT) {
		ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
					ISER_HEADERS_LEN, DMA_TO_DEVICE);
517
		kmem_cache_free(ig.desc_cache, tx_desc);
518
	}
519

520
	atomic_dec(&ib_conn->post_send_buf_count);
521 522 523

	if (tx_desc->type == ISCSI_TX_CONTROL) {
		/* this arithmetic is legal by libiscsi dd_data allocation */
524 525 526 527
		task = (void *) ((long)(void *)tx_desc -
				  sizeof(struct iscsi_task));
		if (task->hdr->itt == RESERVED_ITT)
			iscsi_put_task(task);
528 529 530
	}
}

531
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
532 533

{
534
	iser_task->status = ISER_TASK_STATUS_INIT;
535

536 537
	iser_task->dir[ISER_DIR_IN] = 0;
	iser_task->dir[ISER_DIR_OUT] = 0;
538

539 540
	iser_task->data[ISER_DIR_IN].data_len  = 0;
	iser_task->data[ISER_DIR_OUT].data_len = 0;
541

542
	memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
543
	       sizeof(struct iser_regd_buf));
544
	memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
545 546 547
	       sizeof(struct iser_regd_buf));
}

548
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
549
{
550
	int is_rdma_aligned = 1;
551
	struct iser_regd_buf *regd;
552 553 554 555

	/* if we were reading, copy back to unaligned sglist,
	 * anyway dma_unmap and free the copy
	 */
556
	if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
557
		is_rdma_aligned = 0;
558
		iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
559
	}
560
	if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
561
		is_rdma_aligned = 0;
562
		iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
563
	}
564

565 566
	if (iser_task->dir[ISER_DIR_IN]) {
		regd = &iser_task->rdma_regd[ISER_DIR_IN];
567 568
		if (regd->reg.is_fmr)
			iser_unreg_mem(&regd->reg);
569 570
	}

571 572
	if (iser_task->dir[ISER_DIR_OUT]) {
		regd = &iser_task->rdma_regd[ISER_DIR_OUT];
573 574
		if (regd->reg.is_fmr)
			iser_unreg_mem(&regd->reg);
575 576
	}

577 578
       /* if the data was unaligned, it was already unmapped and then copied */
       if (is_rdma_aligned)
579
		iser_dma_unmap_task_data(iser_task);
580
}