iser_initiator.c 19.4 KB
Newer Older
1 2
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
O
Or Gerlitz 已提交
3
 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/kfifo.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>

#include "iscsi_iser.h"

/* Register user buffer memory and initialize passive rdma
 *  dto descriptor. Total data size is stored in
45
 *  iser_task->data[ISER_DIR_IN].data_len
46
 */
47
static int iser_prepare_read_cmd(struct iscsi_task *task,
48 49 50
				 unsigned int edtl)

{
51
	struct iscsi_iser_task *iser_task = task->dd_data;
52
	struct iser_device  *device = iser_task->iser_conn->ib_conn->device;
53 54
	struct iser_regd_buf *regd_buf;
	int err;
55 56
	struct iser_hdr *hdr = &iser_task->desc.iser_header;
	struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
57

58
	err = iser_dma_map_task_data(iser_task,
59 60 61 62 63 64
				     buf_in,
				     ISER_DIR_IN,
				     DMA_FROM_DEVICE);
	if (err)
		return err;

65
	if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
66 67
		iser_err("Total data length: %ld, less than EDTL: "
			 "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
68 69
			 iser_task->data[ISER_DIR_IN].data_len, edtl,
			 task->itt, iser_task->iser_conn);
70 71 72
		return -EINVAL;
	}

73
	err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
74 75 76 77
	if (err) {
		iser_err("Failed to set up Data-IN RDMA\n");
		return err;
	}
78
	regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
79 80 81 82 83 84

	hdr->flags    |= ISER_RSV;
	hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
	hdr->read_va   = cpu_to_be64(regd_buf->reg.va);

	iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
85
		 task->itt, regd_buf->reg.rkey,
86 87 88 89 90 91 92
		 (unsigned long long)regd_buf->reg.va);

	return 0;
}

/* Register user buffer memory and initialize passive rdma
 *  dto descriptor. Total data size is stored in
93
 *  task->data[ISER_DIR_OUT].data_len
94 95
 */
static int
96
iser_prepare_write_cmd(struct iscsi_task *task,
97 98 99 100
		       unsigned int imm_sz,
		       unsigned int unsol_sz,
		       unsigned int edtl)
{
101
	struct iscsi_iser_task *iser_task = task->dd_data;
102
	struct iser_device  *device = iser_task->iser_conn->ib_conn->device;
103 104
	struct iser_regd_buf *regd_buf;
	int err;
105 106
	struct iser_hdr *hdr = &iser_task->desc.iser_header;
	struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
107
	struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
108

109
	err = iser_dma_map_task_data(iser_task,
110 111 112 113 114 115
				     buf_out,
				     ISER_DIR_OUT,
				     DMA_TO_DEVICE);
	if (err)
		return err;

116
	if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
117 118
		iser_err("Total data length: %ld, less than EDTL: %d, "
			 "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
119 120
			 iser_task->data[ISER_DIR_OUT].data_len,
			 edtl, task->itt, task->conn);
121 122 123
		return -EINVAL;
	}

124
	err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
125 126 127 128 129
	if (err != 0) {
		iser_err("Failed to register write cmd RDMA mem\n");
		return err;
	}

130
	regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
131 132 133 134 135 136 137 138

	if (unsol_sz < edtl) {
		hdr->flags     |= ISER_WSV;
		hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey);
		hdr->write_va   = cpu_to_be64(regd_buf->reg.va + unsol_sz);

		iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
			 "VA:%#llX + unsol:%d\n",
139
			 task->itt, regd_buf->reg.rkey,
140 141 142 143 144
			 (unsigned long long)regd_buf->reg.va, unsol_sz);
	}

	if (imm_sz > 0) {
		iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
145
			 task->itt, imm_sz);
146 147 148 149
		tx_dsg->addr   = regd_buf->reg.va;
		tx_dsg->length = imm_sz;
		tx_dsg->lkey   = regd_buf->reg.lkey;
		iser_task->desc.num_sge = 2;
150 151 152 153 154 155
	}

	return 0;
}

/* creates a new tx descriptor and adds header regd buffer */
156 157
static void iser_create_send_desc(struct iser_conn	*ib_conn,
				  struct iser_tx_desc	*tx_desc)
158
{
159
	struct iser_device *device = ib_conn->device;
160

161 162
	ib_dma_sync_single_for_cpu(device->ib_device,
		tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
163 164 165 166

	memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
	tx_desc->iser_header.flags = ISER_VER;

167 168 169 170 171 172
	tx_desc->num_sge = 1;

	if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
		tx_desc->tx_sg[0].lkey = device->mr->lkey;
		iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
	}
173 174
}

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
static void iser_free_login_buf(struct iser_conn *ib_conn)
{
	if (!ib_conn->login_buf)
		return;

	if (ib_conn->login_req_dma)
		ib_dma_unmap_single(ib_conn->device->ib_device,
				    ib_conn->login_req_dma,
				    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);

	if (ib_conn->login_resp_dma)
		ib_dma_unmap_single(ib_conn->device->ib_device,
				    ib_conn->login_resp_dma,
				    ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);

	kfree(ib_conn->login_buf);

	/* make sure we never redo any unmapping */
	ib_conn->login_req_dma = 0;
	ib_conn->login_resp_dma = 0;
	ib_conn->login_buf = NULL;
}

static int iser_alloc_login_buf(struct iser_conn *ib_conn)
{
	struct iser_device	*device;
	int			req_err, resp_err;

	BUG_ON(ib_conn->device == NULL);

	device = ib_conn->device;

	ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
				     ISER_RX_LOGIN_SIZE, GFP_KERNEL);
	if (!ib_conn->login_buf)
		goto out_err;

	ib_conn->login_req_buf  = ib_conn->login_buf;
	ib_conn->login_resp_buf = ib_conn->login_buf +
						ISCSI_DEF_MAX_RECV_SEG_LEN;

	ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
				(void *)ib_conn->login_req_buf,
				ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);

	ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
				(void *)ib_conn->login_resp_buf,
				ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);

	req_err  = ib_dma_mapping_error(device->ib_device,
					ib_conn->login_req_dma);
	resp_err = ib_dma_mapping_error(device->ib_device,
					ib_conn->login_resp_dma);
228

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
	if (req_err || resp_err) {
		if (req_err)
			ib_conn->login_req_dma = 0;
		if (resp_err)
			ib_conn->login_resp_dma = 0;
		goto free_login_buf;
	}
	return 0;

free_login_buf:
	iser_free_login_buf(ib_conn);

out_err:
	iser_err("unable to alloc or map login buf\n");
	return -ENOMEM;
}
245

246
int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session)
247 248 249 250 251 252 253
{
	int i, j;
	u64 dma_addr;
	struct iser_rx_desc *rx_desc;
	struct ib_sge       *rx_sg;
	struct iser_device  *device = ib_conn->device;

254 255 256 257
	ib_conn->qp_max_recv_dtos = session->cmds_max;
	ib_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
	ib_conn->min_posted_rx = ib_conn->qp_max_recv_dtos >> 2;

258 259
	if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
		goto create_rdma_reg_res_failed;
260 261 262 263

	if (iser_alloc_login_buf(ib_conn))
		goto alloc_login_buf_fail;

264
	ib_conn->rx_descs = kmalloc(session->cmds_max *
265 266 267 268 269 270
				sizeof(struct iser_rx_desc), GFP_KERNEL);
	if (!ib_conn->rx_descs)
		goto rx_desc_alloc_fail;

	rx_desc = ib_conn->rx_descs;

271
	for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++)  {
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
		dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
					ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
		if (ib_dma_mapping_error(device->ib_device, dma_addr))
			goto rx_desc_dma_map_failed;

		rx_desc->dma_addr = dma_addr;

		rx_sg = &rx_desc->rx_sg;
		rx_sg->addr   = rx_desc->dma_addr;
		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
		rx_sg->lkey   = device->mr->lkey;
	}

	ib_conn->rx_desc_head = 0;
	return 0;

rx_desc_dma_map_failed:
	rx_desc = ib_conn->rx_descs;
	for (j = 0; j < i; j++, rx_desc++)
		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
292
				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
293 294 295
	kfree(ib_conn->rx_descs);
	ib_conn->rx_descs = NULL;
rx_desc_alloc_fail:
296 297
	iser_free_login_buf(ib_conn);
alloc_login_buf_fail:
298 299
	device->iser_free_rdma_reg_res(ib_conn);
create_rdma_reg_res_failed:
300 301 302 303 304 305 306 307 308 309 310
	iser_err("failed allocating rx descriptors / data buffers\n");
	return -ENOMEM;
}

void iser_free_rx_descriptors(struct iser_conn *ib_conn)
{
	int i;
	struct iser_rx_desc *rx_desc;
	struct iser_device *device = ib_conn->device;

	if (!ib_conn->rx_descs)
311
		goto free_login_buf;
312

313
	if (device->iser_free_rdma_reg_res)
314
		device->iser_free_rdma_reg_res(ib_conn);
315 316

	rx_desc = ib_conn->rx_descs;
317
	for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++)
318
		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
319
				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
320
	kfree(ib_conn->rx_descs);
321 322 323 324 325
	/* make sure we never redo any unmapping */
	ib_conn->rx_descs = NULL;

free_login_buf:
	iser_free_login_buf(ib_conn);
326 327
}

328
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
329 330
{
	struct iscsi_iser_conn *iser_conn = conn->dd_data;
331
	struct iscsi_session *session = conn->session;
332

333 334 335 336
	iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
	/* check if this is the last login - going to full feature phase */
	if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
		return 0;
337

338 339 340 341 342 343 344
	/*
	 * Check that there is one posted recv buffer (for the last login
	 * response) and no posted send buffers left - they must have been
	 * consumed during previous login phases.
	 */
	WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1);
	WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
345

346 347 348 349 350
	if (session->discovery_sess) {
		iser_info("Discovery session, re-using login RX buffer\n");
		return 0;
	} else
		iser_info("Normal session, posting batch of RX %d buffers\n",
351
			  iser_conn->ib_conn->min_posted_rx);
352

353
	/* Initial post receive buffers */
354 355
	if (iser_post_recvm(iser_conn->ib_conn,
			    iser_conn->ib_conn->min_posted_rx))
356 357
		return -ENOMEM;

358 359 360 361 362 363
	return 0;
}

/**
 * iser_send_command - send command PDU
 */
364
int iser_send_command(struct iscsi_conn *conn,
365
		      struct iscsi_task *task)
366 367
{
	struct iscsi_iser_conn *iser_conn = conn->dd_data;
368
	struct iscsi_iser_task *iser_task = task->dd_data;
369
	unsigned long edtl;
370
	int err;
371
	struct iser_data_buf *data_buf;
372
	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
373
	struct scsi_cmnd *sc  =  task->sc;
374
	struct iser_tx_desc *tx_desc = &iser_task->desc;
375 376 377 378

	edtl = ntohl(hdr->data_length);

	/* build the tx desc regd header and add it to the tx desc dto */
379 380
	tx_desc->type = ISCSI_TX_SCSI_COMMAND;
	iser_create_send_desc(iser_conn->ib_conn, tx_desc);
381 382

	if (hdr->flags & ISCSI_FLAG_CMD_READ)
383
		data_buf = &iser_task->data[ISER_DIR_IN];
384
	else
385
		data_buf = &iser_task->data[ISER_DIR_OUT];
386

387 388 389
	if (scsi_sg_count(sc)) { /* using a scatter list */
		data_buf->buf  = scsi_sglist(sc);
		data_buf->size = scsi_sg_count(sc);
390 391
	}

392
	data_buf->data_len = scsi_bufflen(sc);
393 394

	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
395
		err = iser_prepare_read_cmd(task, edtl);
396 397 398 399
		if (err)
			goto send_command_error;
	}
	if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
400 401 402
		err = iser_prepare_write_cmd(task,
					     task->imm_count,
				             task->imm_count +
403
					     task->unsol_r2t.data_length,
404 405 406 407 408
					     edtl);
		if (err)
			goto send_command_error;
	}

409
	iser_task->status = ISER_TASK_STATUS_STARTED;
410

411
	err = iser_post_send(iser_conn->ib_conn, tx_desc);
412 413 414 415
	if (!err)
		return 0;

send_command_error:
416
	iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
417 418 419 420 421 422
	return err;
}

/**
 * iser_send_data_out - send data out PDU
 */
423
int iser_send_data_out(struct iscsi_conn *conn,
424
		       struct iscsi_task *task,
425 426 427
		       struct iscsi_data *hdr)
{
	struct iscsi_iser_conn *iser_conn = conn->dd_data;
428
	struct iscsi_iser_task *iser_task = task->dd_data;
429 430
	struct iser_tx_desc *tx_desc = NULL;
	struct iser_regd_buf *regd_buf;
431 432
	unsigned long buf_offset;
	unsigned long data_seg_len;
433
	uint32_t itt;
434
	int err = 0;
435 436
	struct ib_sge *tx_dsg;

437
	itt = (__force uint32_t)hdr->itt;
438 439 440 441 442 443
	data_seg_len = ntoh24(hdr->dlength);
	buf_offset   = ntohl(hdr->offset);

	iser_dbg("%s itt %d dseg_len %d offset %d\n",
		 __func__,(int)itt,(int)data_seg_len,(int)buf_offset);

O
Or Gerlitz 已提交
444
	tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
445 446 447 448 449 450
	if (tx_desc == NULL) {
		iser_err("Failed to alloc desc for post dataout\n");
		return -ENOMEM;
	}

	tx_desc->type = ISCSI_TX_DATAOUT;
451
	tx_desc->iser_header.flags = ISER_VER;
452 453
	memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));

454 455
	/* build the tx desc */
	iser_initialize_task_headers(task, tx_desc);
456

457 458 459 460 461 462
	regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
	tx_dsg = &tx_desc->tx_sg[1];
	tx_dsg->addr    = regd_buf->reg.va + buf_offset;
	tx_dsg->length  = data_seg_len;
	tx_dsg->lkey    = regd_buf->reg.lkey;
	tx_desc->num_sge = 2;
463

464
	if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
465 466 467
		iser_err("Offset:%ld & DSL:%ld in Data-Out "
			 "inconsistent with total len:%ld, itt:%d\n",
			 buf_offset, data_seg_len,
468
			 iser_task->data[ISER_DIR_OUT].data_len, itt);
469 470 471 472 473 474 475
		err = -EINVAL;
		goto send_data_out_error;
	}
	iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
		 itt, buf_offset, data_seg_len);


476
	err = iser_post_send(iser_conn->ib_conn, tx_desc);
477 478 479 480 481 482 483 484 485 486
	if (!err)
		return 0;

send_data_out_error:
	kmem_cache_free(ig.desc_cache, tx_desc);
	iser_err("conn %p failed err %d\n",conn, err);
	return err;
}

int iser_send_control(struct iscsi_conn *conn,
487
		      struct iscsi_task *task)
488 489
{
	struct iscsi_iser_conn *iser_conn = conn->dd_data;
490
	struct iscsi_iser_task *iser_task = task->dd_data;
491
	struct iser_tx_desc *mdesc = &iser_task->desc;
492
	unsigned long data_seg_len;
493
	int err = 0;
494
	struct iser_device *device;
495
	struct iser_conn *ib_conn = iser_conn->ib_conn;
496 497 498

	/* build the tx desc regd header and add it to the tx desc dto */
	mdesc->type = ISCSI_TX_CONTROL;
499
	iser_create_send_desc(iser_conn->ib_conn, mdesc);
500 501 502

	device = iser_conn->ib_conn->device;

503
	data_seg_len = ntoh24(task->hdr->dlength);
504 505

	if (data_seg_len > 0) {
506 507 508 509 510
		struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
		if (task != conn->login_task) {
			iser_err("data present on non login task!!!\n");
			goto send_control_error;
		}
511 512 513 514 515 516

		ib_dma_sync_single_for_cpu(device->ib_device,
			ib_conn->login_req_dma, task->data_count,
			DMA_TO_DEVICE);

		memcpy(iser_conn->ib_conn->login_req_buf, task->data,
517
							task->data_count);
518 519 520 521 522 523

		ib_dma_sync_single_for_device(device->ib_device,
			ib_conn->login_req_dma, task->data_count,
			DMA_TO_DEVICE);

		tx_dsg->addr    = iser_conn->ib_conn->login_req_dma;
O
Or Gerlitz 已提交
524
		tx_dsg->length  = task->data_count;
525 526
		tx_dsg->lkey    = device->mr->lkey;
		mdesc->num_sge = 2;
527 528
	}

529
	if (task == conn->login_task) {
530 531
		iser_dbg("op %x dsl %lx, posting login rx buffer\n",
			 task->hdr->opcode, data_seg_len);
532 533 534
		err = iser_post_recvl(iser_conn->ib_conn);
		if (err)
			goto send_control_error;
535 536 537
		err = iser_post_rx_bufs(conn, task->hdr);
		if (err)
			goto send_control_error;
538 539
	}

540
	err = iser_post_send(iser_conn->ib_conn, mdesc);
541 542 543 544 545 546 547 548 549 550 551
	if (!err)
		return 0;

send_control_error:
	iser_err("conn %p failed err %d\n",conn, err);
	return err;
}

/**
 * iser_rcv_dto_completion - recv DTO completion
 */
552 553 554
void iser_rcv_completion(struct iser_rx_desc *rx_desc,
			 unsigned long rx_xfer_len,
			 struct iser_conn *ib_conn)
555
{
556
	struct iscsi_iser_conn *conn = ib_conn->iser_conn;
557
	struct iscsi_hdr *hdr;
558 559 560 561
	u64 rx_dma;
	int rx_buflen, outstanding, count, err;

	/* differentiate between login to all other PDUs */
562 563
	if ((char *)rx_desc == ib_conn->login_resp_buf) {
		rx_dma = ib_conn->login_resp_dma;
564 565 566 567 568
		rx_buflen = ISER_RX_LOGIN_SIZE;
	} else {
		rx_dma = rx_desc->dma_addr;
		rx_buflen = ISER_RX_PAYLOAD_SIZE;
	}
569

570 571
	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
			rx_buflen, DMA_FROM_DEVICE);
572

573
	hdr = &rx_desc->iscsi_header;
574

575 576
	iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
			hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
577

578 579
	iscsi_iser_recv(conn->iscsi_conn, hdr,
		rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN);
580

581 582
	ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
			rx_buflen, DMA_FROM_DEVICE);
583 584 585 586 587

	/* decrementing conn->post_recv_buf_count only --after-- freeing the   *
	 * task eliminates the need to worry on tasks which are completed in   *
	 * parallel to the execution of iser_conn_term. So the code that waits *
	 * for the posted rx bufs refcount to become zero handles everything   */
588
	conn->ib_conn->post_recv_buf_count--;
589

590
	if (rx_dma == ib_conn->login_resp_dma)
591 592
		return;

593
	outstanding = ib_conn->post_recv_buf_count;
594 595 596
	if (outstanding + ib_conn->min_posted_rx <= ib_conn->qp_max_recv_dtos) {
		count = min(ib_conn->qp_max_recv_dtos - outstanding,
						ib_conn->min_posted_rx);
597 598 599 600
		err = iser_post_recvm(ib_conn, count);
		if (err)
			iser_err("posting %d rx bufs err %d\n", count, err);
	}
601 602
}

603 604
void iser_snd_completion(struct iser_tx_desc *tx_desc,
			struct iser_conn *ib_conn)
605
{
606
	struct iscsi_task *task;
607
	struct iser_device *device = ib_conn->device;
608

609 610 611
	if (tx_desc->type == ISCSI_TX_DATAOUT) {
		ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
					ISER_HEADERS_LEN, DMA_TO_DEVICE);
612
		kmem_cache_free(ig.desc_cache, tx_desc);
613
		tx_desc = NULL;
614
	}
615

616
	atomic_dec(&ib_conn->post_send_buf_count);
617

618
	if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
619
		/* this arithmetic is legal by libiscsi dd_data allocation */
620 621 622 623
		task = (void *) ((long)(void *)tx_desc -
				  sizeof(struct iscsi_task));
		if (task->hdr->itt == RESERVED_ITT)
			iscsi_put_task(task);
624 625 626
	}
}

627
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
628 629

{
630
	iser_task->status = ISER_TASK_STATUS_INIT;
631

632 633
	iser_task->dir[ISER_DIR_IN] = 0;
	iser_task->dir[ISER_DIR_OUT] = 0;
634

635 636
	iser_task->data[ISER_DIR_IN].data_len  = 0;
	iser_task->data[ISER_DIR_OUT].data_len = 0;
637

638
	memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
639
	       sizeof(struct iser_regd_buf));
640
	memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
641 642 643
	       sizeof(struct iser_regd_buf));
}

644
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
645
{
646
	struct iser_device *device = iser_task->iser_conn->ib_conn->device;
647
	int is_rdma_data_aligned = 1;
648 649 650 651

	/* if we were reading, copy back to unaligned sglist,
	 * anyway dma_unmap and free the copy
	 */
652
	if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
653 654 655 656 657
		is_rdma_data_aligned = 0;
		iser_finalize_rdma_unaligned_sg(iser_task,
						&iser_task->data[ISER_DIR_IN],
						&iser_task->data_copy[ISER_DIR_IN],
						ISER_DIR_IN);
658
	}
659

660
	if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
661 662 663 664 665
		is_rdma_data_aligned = 0;
		iser_finalize_rdma_unaligned_sg(iser_task,
						&iser_task->data[ISER_DIR_OUT],
						&iser_task->data_copy[ISER_DIR_OUT],
						ISER_DIR_OUT);
666
	}
667

668
	if (iser_task->dir[ISER_DIR_IN]) {
669
		device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
670 671 672
		if (is_rdma_data_aligned)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->data[ISER_DIR_IN]);
673

674
	}
675

676 677 678 679 680 681 682 683 684
	if (iser_task->dir[ISER_DIR_OUT]) {
		device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
		if (is_rdma_data_aligned)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->data[ISER_DIR_OUT]);
		if (prot_count && is_rdma_prot_aligned)
			iser_dma_unmap_task_data(iser_task,
						 &iser_task->prot[ISER_DIR_OUT]);
	}
685
}