iser_initiator.c 21.4 KB
Newer Older
1 2
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/kfifo.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>

#include "iscsi_iser.h"

/* Register user buffer memory and initialize passive rdma
44 45 46
 *  dto descriptor. Data size is stored in
 *  task->data[ISER_DIR_IN].data_len, Protection size
 *  os stored in task->prot[ISER_DIR_IN].data_len
47
 */
48
static int iser_prepare_read_cmd(struct iscsi_task *task)
49 50

{
51
	struct iscsi_iser_task *iser_task = task->dd_data;
52
	struct iser_mem_reg *mem_reg;
53
	int err;
54
	struct iser_ctrl *hdr = &iser_task->desc.iser_header;
55
	struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
56

57
	err = iser_dma_map_task_data(iser_task,
58 59 60 61 62 63
				     buf_in,
				     ISER_DIR_IN,
				     DMA_FROM_DEVICE);
	if (err)
		return err;

64 65 66 67 68 69 70 71 72 73 74
	if (scsi_prot_sg_count(iser_task->sc)) {
		struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];

		err = iser_dma_map_task_data(iser_task,
					     pbuf_in,
					     ISER_DIR_IN,
					     DMA_FROM_DEVICE);
		if (err)
			return err;
	}

75
	err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN, false);
76 77 78 79
	if (err) {
		iser_err("Failed to set up Data-IN RDMA\n");
		return err;
	}
80
	mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
81 82

	hdr->flags    |= ISER_RSV;
83
	hdr->read_stag = cpu_to_be32(mem_reg->rkey);
84
	hdr->read_va   = cpu_to_be64(mem_reg->sge.addr);
85 86

	iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
87
		 task->itt, mem_reg->rkey,
88
		 (unsigned long long)mem_reg->sge.addr);
89 90 91 92 93

	return 0;
}

/* Register user buffer memory and initialize passive rdma
94 95 96
 *  dto descriptor. Data size is stored in
 *  task->data[ISER_DIR_OUT].data_len, Protection size
 *  is stored at task->prot[ISER_DIR_OUT].data_len
97 98
 */
static int
99
iser_prepare_write_cmd(struct iscsi_task *task,
100 101 102 103
		       unsigned int imm_sz,
		       unsigned int unsol_sz,
		       unsigned int edtl)
{
104
	struct iscsi_iser_task *iser_task = task->dd_data;
105
	struct iser_mem_reg *mem_reg;
106
	int err;
107
	struct iser_ctrl *hdr = &iser_task->desc.iser_header;
108
	struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
109
	struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
110

111
	err = iser_dma_map_task_data(iser_task,
112 113 114 115 116 117
				     buf_out,
				     ISER_DIR_OUT,
				     DMA_TO_DEVICE);
	if (err)
		return err;

118 119 120 121 122 123 124 125 126 127 128
	if (scsi_prot_sg_count(iser_task->sc)) {
		struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];

		err = iser_dma_map_task_data(iser_task,
					     pbuf_out,
					     ISER_DIR_OUT,
					     DMA_TO_DEVICE);
		if (err)
			return err;
	}

129 130
	err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT,
				buf_out->data_len == imm_sz);
131 132 133 134 135
	if (err != 0) {
		iser_err("Failed to register write cmd RDMA mem\n");
		return err;
	}

136
	mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
137 138 139

	if (unsol_sz < edtl) {
		hdr->flags     |= ISER_WSV;
140 141 142 143
		if (buf_out->data_len > imm_sz) {
			hdr->write_stag = cpu_to_be32(mem_reg->rkey);
			hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
		}
144 145 146

		iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
			 "VA:%#llX + unsol:%d\n",
147
			 task->itt, mem_reg->rkey,
148
			 (unsigned long long)mem_reg->sge.addr, unsol_sz);
149 150 151 152
	}

	if (imm_sz > 0) {
		iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
153
			 task->itt, imm_sz);
154
		tx_dsg->addr = mem_reg->sge.addr;
155
		tx_dsg->length = imm_sz;
156
		tx_dsg->lkey = mem_reg->sge.lkey;
157
		iser_task->desc.num_sge = 2;
158 159 160 161 162 163
	}

	return 0;
}

/* creates a new tx descriptor and adds header regd buffer */
164
static void iser_create_send_desc(struct iser_conn	*iser_conn,
165
				  struct iser_tx_desc	*tx_desc)
166
{
S
Sagi Grimberg 已提交
167
	struct iser_device *device = iser_conn->ib_conn.device;
168

169 170
	ib_dma_sync_single_for_cpu(device->ib_device,
		tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
171

172
	memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
173
	tx_desc->iser_header.flags = ISER_VER;
174
	tx_desc->num_sge = 1;
175 176
}

177
static void iser_free_login_buf(struct iser_conn *iser_conn)
178
{
S
Sagi Grimberg 已提交
179
	struct iser_device *device = iser_conn->ib_conn.device;
180
	struct iser_login_desc *desc = &iser_conn->login_desc;
S
Sagi Grimberg 已提交
181

182
	if (!desc->req)
183 184
		return;

185 186
	ib_dma_unmap_single(device->ib_device, desc->req_dma,
			    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
187

188 189
	ib_dma_unmap_single(device->ib_device, desc->rsp_dma,
			    ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
190

191 192
	kfree(desc->req);
	kfree(desc->rsp);
193 194

	/* make sure we never redo any unmapping */
195 196
	desc->req = NULL;
	desc->rsp = NULL;
197 198
}

199
static int iser_alloc_login_buf(struct iser_conn *iser_conn)
200
{
S
Sagi Grimberg 已提交
201
	struct iser_device *device = iser_conn->ib_conn.device;
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
	struct iser_login_desc *desc = &iser_conn->login_desc;

	desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
	if (!desc->req)
		return -ENOMEM;

	desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
					  ISCSI_DEF_MAX_RECV_SEG_LEN,
					  DMA_TO_DEVICE);
	if (ib_dma_mapping_error(device->ib_device,
				desc->req_dma))
		goto free_req;

	desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
	if (!desc->rsp)
		goto unmap_req;

	desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
					   ISER_RX_LOGIN_SIZE,
					   DMA_FROM_DEVICE);
	if (ib_dma_mapping_error(device->ib_device,
				desc->rsp_dma))
		goto free_rsp;

226 227
	return 0;

228 229 230 231 232 233 234 235
free_rsp:
	kfree(desc->rsp);
unmap_req:
	ib_dma_unmap_single(device->ib_device, desc->req_dma,
			    ISCSI_DEF_MAX_RECV_SEG_LEN,
			    DMA_TO_DEVICE);
free_req:
	kfree(desc->req);
236 237 238

	return -ENOMEM;
}
239

240 241
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
			      struct iscsi_session *session)
242 243 244 245 246
{
	int i, j;
	u64 dma_addr;
	struct iser_rx_desc *rx_desc;
	struct ib_sge       *rx_sg;
S
Sagi Grimberg 已提交
247 248
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
249

250 251 252
	iser_conn->qp_max_recv_dtos = session->cmds_max;
	iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
	iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
253

254
	if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max,
255
					   iser_conn->scsi_sg_tablesize))
256
		goto create_rdma_reg_res_failed;
257

258
	if (iser_alloc_login_buf(iser_conn))
259 260
		goto alloc_login_buf_fail;

261 262
	iser_conn->num_rx_descs = session->cmds_max;
	iser_conn->rx_descs = kmalloc(iser_conn->num_rx_descs *
263
				sizeof(struct iser_rx_desc), GFP_KERNEL);
264
	if (!iser_conn->rx_descs)
265 266
		goto rx_desc_alloc_fail;

267
	rx_desc = iser_conn->rx_descs;
268

269
	for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)  {
270 271 272 273 274 275
		dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
					ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
		if (ib_dma_mapping_error(device->ib_device, dma_addr))
			goto rx_desc_dma_map_failed;

		rx_desc->dma_addr = dma_addr;
276
		rx_desc->cqe.done = iser_task_rsp;
277
		rx_sg = &rx_desc->rx_sg;
278
		rx_sg->addr = rx_desc->dma_addr;
279
		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
280
		rx_sg->lkey = device->pd->local_dma_lkey;
281 282
	}

283
	iser_conn->rx_desc_head = 0;
284 285 286
	return 0;

rx_desc_dma_map_failed:
287
	rx_desc = iser_conn->rx_descs;
288 289
	for (j = 0; j < i; j++, rx_desc++)
		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
290
				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
291 292
	kfree(iser_conn->rx_descs);
	iser_conn->rx_descs = NULL;
293
rx_desc_alloc_fail:
294
	iser_free_login_buf(iser_conn);
295
alloc_login_buf_fail:
S
Sagi Grimberg 已提交
296
	device->reg_ops->free_reg_res(ib_conn);
297
create_rdma_reg_res_failed:
298 299 300 301
	iser_err("failed allocating rx descriptors / data buffers\n");
	return -ENOMEM;
}

302
void iser_free_rx_descriptors(struct iser_conn *iser_conn)
303 304 305
{
	int i;
	struct iser_rx_desc *rx_desc;
S
Sagi Grimberg 已提交
306 307
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
308

S
Sagi Grimberg 已提交
309 310
	if (device->reg_ops->free_reg_res)
		device->reg_ops->free_reg_res(ib_conn);
311

312 313
	rx_desc = iser_conn->rx_descs;
	for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
314
		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
315
				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
316
	kfree(iser_conn->rx_descs);
317
	/* make sure we never redo any unmapping */
318
	iser_conn->rx_descs = NULL;
319

320
	iser_free_login_buf(iser_conn);
321 322
}

323
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
324
{
325
	struct iser_conn *iser_conn = conn->dd_data;
S
Sagi Grimberg 已提交
326
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
327
	struct iscsi_session *session = conn->session;
328

329 330 331 332
	iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
	/* check if this is the last login - going to full feature phase */
	if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
		return 0;
333

334
	/*
335 336
	 * Check that there is one posted recv buffer
	 * (for the last login response).
337
	 */
S
Sagi Grimberg 已提交
338
	WARN_ON(ib_conn->post_recv_buf_count != 1);
339

340 341 342 343 344
	if (session->discovery_sess) {
		iser_info("Discovery session, re-using login RX buffer\n");
		return 0;
	} else
		iser_info("Normal session, posting batch of RX %d buffers\n",
345
			  iser_conn->min_posted_rx);
346

347
	/* Initial post receive buffers */
348
	if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx))
349 350
		return -ENOMEM;

351 352 353
	return 0;
}

M
Max Gurtovoy 已提交
354
static inline bool iser_signal_comp(u8 sig_count)
355 356 357 358
{
	return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
}

359 360 361
/**
 * iser_send_command - send command PDU
 */
362
int iser_send_command(struct iscsi_conn *conn,
363
		      struct iscsi_task *task)
364
{
365
	struct iser_conn *iser_conn = conn->dd_data;
366
	struct iscsi_iser_task *iser_task = task->dd_data;
367
	unsigned long edtl;
368
	int err;
369
	struct iser_data_buf *data_buf, *prot_buf;
370
	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
371
	struct scsi_cmnd *sc  =  task->sc;
372
	struct iser_tx_desc *tx_desc = &iser_task->desc;
M
Max Gurtovoy 已提交
373
	u8 sig_count = ++iser_conn->ib_conn.sig_count;
374 375 376 377

	edtl = ntohl(hdr->data_length);

	/* build the tx desc regd header and add it to the tx desc dto */
378
	tx_desc->type = ISCSI_TX_SCSI_COMMAND;
379
	tx_desc->cqe.done = iser_cmd_comp;
380
	iser_create_send_desc(iser_conn, tx_desc);
381

382
	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
383
		data_buf = &iser_task->data[ISER_DIR_IN];
384 385
		prot_buf = &iser_task->prot[ISER_DIR_IN];
	} else {
386
		data_buf = &iser_task->data[ISER_DIR_OUT];
387 388
		prot_buf = &iser_task->prot[ISER_DIR_OUT];
	}
389

390
	if (scsi_sg_count(sc)) { /* using a scatter list */
391
		data_buf->sg = scsi_sglist(sc);
392
		data_buf->size = scsi_sg_count(sc);
393
	}
394
	data_buf->data_len = scsi_bufflen(sc);
395

396
	if (scsi_prot_sg_count(sc)) {
397
		prot_buf->sg  = scsi_prot_sglist(sc);
398
		prot_buf->size = scsi_prot_sg_count(sc);
399 400
		prot_buf->data_len = (data_buf->data_len >>
				     ilog2(sc->device->sector_size)) * 8;
401 402
	}

403
	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
404
		err = iser_prepare_read_cmd(task);
405 406 407 408
		if (err)
			goto send_command_error;
	}
	if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
409 410 411
		err = iser_prepare_write_cmd(task,
					     task->imm_count,
				             task->imm_count +
412
					     task->unsol_r2t.data_length,
413 414 415 416 417
					     edtl);
		if (err)
			goto send_command_error;
	}

418
	iser_task->status = ISER_TASK_STATUS_STARTED;
419

420
	err = iser_post_send(&iser_conn->ib_conn, tx_desc,
M
Max Gurtovoy 已提交
421
			     iser_signal_comp(sig_count));
422 423 424 425
	if (!err)
		return 0;

send_command_error:
426
	iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
427 428 429 430 431 432
	return err;
}

/**
 * iser_send_data_out - send data out PDU
 */
433
int iser_send_data_out(struct iscsi_conn *conn,
434
		       struct iscsi_task *task,
435 436
		       struct iscsi_data *hdr)
{
437
	struct iser_conn *iser_conn = conn->dd_data;
438
	struct iscsi_iser_task *iser_task = task->dd_data;
439
	struct iser_tx_desc *tx_desc;
440
	struct iser_mem_reg *mem_reg;
441 442
	unsigned long buf_offset;
	unsigned long data_seg_len;
443
	uint32_t itt;
444
	int err;
445 446
	struct ib_sge *tx_dsg;

447
	itt = (__force uint32_t)hdr->itt;
448 449 450 451 452 453
	data_seg_len = ntoh24(hdr->dlength);
	buf_offset   = ntohl(hdr->offset);

	iser_dbg("%s itt %d dseg_len %d offset %d\n",
		 __func__,(int)itt,(int)data_seg_len,(int)buf_offset);

O
Or Gerlitz 已提交
454
	tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
455
	if (!tx_desc)
456 457 458
		return -ENOMEM;

	tx_desc->type = ISCSI_TX_DATAOUT;
459
	tx_desc->cqe.done = iser_dataout_comp;
460
	tx_desc->iser_header.flags = ISER_VER;
461 462
	memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));

463
	/* build the tx desc */
464 465 466
	err = iser_initialize_task_headers(task, tx_desc);
	if (err)
		goto send_data_out_error;
467

468
	mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
469
	tx_dsg = &tx_desc->tx_sg[1];
470 471 472
	tx_dsg->addr = mem_reg->sge.addr + buf_offset;
	tx_dsg->length = data_seg_len;
	tx_dsg->lkey = mem_reg->sge.lkey;
473
	tx_desc->num_sge = 2;
474

475
	if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
476 477 478
		iser_err("Offset:%ld & DSL:%ld in Data-Out "
			 "inconsistent with total len:%ld, itt:%d\n",
			 buf_offset, data_seg_len,
479
			 iser_task->data[ISER_DIR_OUT].data_len, itt);
480 481 482 483 484 485 486
		err = -EINVAL;
		goto send_data_out_error;
	}
	iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
		 itt, buf_offset, data_seg_len);


487
	err = iser_post_send(&iser_conn->ib_conn, tx_desc, true);
488 489 490 491 492
	if (!err)
		return 0;

send_data_out_error:
	kmem_cache_free(ig.desc_cache, tx_desc);
493
	iser_err("conn %p failed err %d\n", conn, err);
494 495 496 497
	return err;
}

int iser_send_control(struct iscsi_conn *conn,
498
		      struct iscsi_task *task)
499
{
500
	struct iser_conn *iser_conn = conn->dd_data;
501
	struct iscsi_iser_task *iser_task = task->dd_data;
502
	struct iser_tx_desc *mdesc = &iser_task->desc;
503
	unsigned long data_seg_len;
504
	int err = 0;
505 506 507 508
	struct iser_device *device;

	/* build the tx desc regd header and add it to the tx desc dto */
	mdesc->type = ISCSI_TX_CONTROL;
509
	mdesc->cqe.done = iser_ctrl_comp;
510
	iser_create_send_desc(iser_conn, mdesc);
511

S
Sagi Grimberg 已提交
512
	device = iser_conn->ib_conn.device;
513

514
	data_seg_len = ntoh24(task->hdr->dlength);
515 516

	if (data_seg_len > 0) {
517
		struct iser_login_desc *desc = &iser_conn->login_desc;
518
		struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
519

520 521 522 523
		if (task != conn->login_task) {
			iser_err("data present on non login task!!!\n");
			goto send_control_error;
		}
524

525 526
		ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma,
					   task->data_count, DMA_TO_DEVICE);
527

528
		memcpy(desc->req, task->data, task->data_count);
529

530 531
		ib_dma_sync_single_for_device(device->ib_device, desc->req_dma,
					      task->data_count, DMA_TO_DEVICE);
532

533 534 535
		tx_dsg->addr = desc->req_dma;
		tx_dsg->length = task->data_count;
		tx_dsg->lkey = device->pd->local_dma_lkey;
536
		mdesc->num_sge = 2;
537 538
	}

539
	if (task == conn->login_task) {
540 541
		iser_dbg("op %x dsl %lx, posting login rx buffer\n",
			 task->hdr->opcode, data_seg_len);
542
		err = iser_post_recvl(iser_conn);
543 544
		if (err)
			goto send_control_error;
545 546 547
		err = iser_post_rx_bufs(conn, task->hdr);
		if (err)
			goto send_control_error;
548 549
	}

550
	err = iser_post_send(&iser_conn->ib_conn, mdesc, true);
551 552 553 554 555 556 557 558
	if (!err)
		return 0;

send_control_error:
	iser_err("conn %p failed err %d\n",conn, err);
	return err;
}

559
void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
560
{
561
	struct ib_conn *ib_conn = wc->qp->qp_context;
562
	struct iser_conn *iser_conn = to_iser_conn(ib_conn);
563
	struct iser_login_desc *desc = iser_login(wc->wr_cqe);
564
	struct iscsi_hdr *hdr;
565
	char *data;
566 567 568 569 570 571 572 573 574 575 576
	int length;

	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		iser_err_comp(wc, "login_rsp");
		return;
	}

	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
				   desc->rsp_dma, ISER_RX_LOGIN_SIZE,
				   DMA_FROM_DEVICE);

577
	hdr = desc->rsp + sizeof(struct iser_ctrl);
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
	data = desc->rsp + ISER_HEADERS_LEN;
	length = wc->byte_len - ISER_HEADERS_LEN;

	iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
		 hdr->itt, length);

	iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length);

	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
				      desc->rsp_dma, ISER_RX_LOGIN_SIZE,
				      DMA_FROM_DEVICE);

	ib_conn->post_recv_buf_count--;
}

593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
static inline void
iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
{
	if (likely(rkey == desc->rsc.mr->rkey))
		desc->rsc.mr_valid = 0;
	else if (likely(rkey == desc->pi_ctx->sig_mr->rkey))
		desc->pi_ctx->sig_mr_valid = 0;
}

static int
iser_check_remote_inv(struct iser_conn *iser_conn,
		      struct ib_wc *wc,
		      struct iscsi_hdr *hdr)
{
	if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
		struct iscsi_task *task;
		u32 rkey = wc->ex.invalidate_rkey;

		iser_dbg("conn %p: remote invalidation for rkey %#x\n",
			 iser_conn, rkey);

		if (unlikely(!iser_conn->snd_w_inv)) {
615
			iser_err("conn %p: unexpected remote invalidation, "
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
				 "terminating connection\n", iser_conn);
			return -EPROTO;
		}

		task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt);
		if (likely(task)) {
			struct iscsi_iser_task *iser_task = task->dd_data;
			struct iser_fr_desc *desc;

			if (iser_task->dir[ISER_DIR_IN]) {
				desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h;
				iser_inv_desc(desc, rkey);
			}

			if (iser_task->dir[ISER_DIR_OUT]) {
				desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h;
				iser_inv_desc(desc, rkey);
			}
		} else {
			iser_err("failed to get task for itt=%d\n", hdr->itt);
			return -EINVAL;
		}
	}

	return 0;
}


644 645 646 647 648 649 650 651 652 653 654 655
void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
{
	struct ib_conn *ib_conn = wc->qp->qp_context;
	struct iser_conn *iser_conn = to_iser_conn(ib_conn);
	struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
	struct iscsi_hdr *hdr;
	int length;
	int outstanding, count, err;

	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		iser_err_comp(wc, "task_rsp");
		return;
656
	}
657

658 659 660
	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
				   desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
				   DMA_FROM_DEVICE);
661

662 663
	hdr = &desc->iscsi_header;
	length = wc->byte_len - ISER_HEADERS_LEN;
664

665
	iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
666
		 hdr->itt, length);
667

668 669 670 671 672 673
	if (iser_check_remote_inv(iser_conn, wc, hdr)) {
		iscsi_conn_failure(iser_conn->iscsi_conn,
				   ISCSI_ERR_CONN_FAILED);
		return;
	}

674
	iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length);
675

676 677 678
	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
				      desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
				      DMA_FROM_DEVICE);
679 680 681 682 683

	/* decrementing conn->post_recv_buf_count only --after-- freeing the   *
	 * task eliminates the need to worry on tasks which are completed in   *
	 * parallel to the execution of iser_conn_term. So the code that waits *
	 * for the posted rx bufs refcount to become zero handles everything   */
S
Sagi Grimberg 已提交
684
	ib_conn->post_recv_buf_count--;
685

S
Sagi Grimberg 已提交
686
	outstanding = ib_conn->post_recv_buf_count;
687 688 689 690
	if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
		count = min(iser_conn->qp_max_recv_dtos - outstanding,
			    iser_conn->min_posted_rx);
		err = iser_post_recvm(iser_conn, count);
691 692 693
		if (err)
			iser_err("posting %d rx bufs err %d\n", count, err);
	}
694 695
}

696 697 698 699 700 701 702
void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
{
	if (unlikely(wc->status != IB_WC_SUCCESS))
		iser_err_comp(wc, "command");
}

void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc)
703
{
704
	struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
705
	struct iscsi_task *task;
706

707 708 709
	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		iser_err_comp(wc, "control");
		return;
710
	}
711

712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
	/* this arithmetic is legal by libiscsi dd_data allocation */
	task = (void *)desc - sizeof(struct iscsi_task);
	if (task->hdr->itt == RESERVED_ITT)
		iscsi_put_task(task);
}

void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
{
	struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
	struct ib_conn *ib_conn = wc->qp->qp_context;
	struct iser_device *device = ib_conn->device;

	if (unlikely(wc->status != IB_WC_SUCCESS))
		iser_err_comp(wc, "dataout");

	ib_dma_unmap_single(device->ib_device, desc->dma_addr,
			    ISER_HEADERS_LEN, DMA_TO_DEVICE);
	kmem_cache_free(ig.desc_cache, desc);
}

732
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
733 734

{
735
	iser_task->status = ISER_TASK_STATUS_INIT;
736

737 738
	iser_task->dir[ISER_DIR_IN] = 0;
	iser_task->dir[ISER_DIR_OUT] = 0;
739

740 741
	iser_task->data[ISER_DIR_IN].data_len  = 0;
	iser_task->data[ISER_DIR_OUT].data_len = 0;
742

743 744 745
	iser_task->prot[ISER_DIR_IN].data_len  = 0;
	iser_task->prot[ISER_DIR_OUT].data_len = 0;

746 747 748 749
	memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
	       sizeof(struct iser_mem_reg));
	memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
	       sizeof(struct iser_mem_reg));
750 751
}

752
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
753
{
754
	int prot_count = scsi_prot_sg_count(iser_task->sc);
755

756
	if (iser_task->dir[ISER_DIR_IN]) {
757
		iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
758 759 760 761
		iser_dma_unmap_task_data(iser_task,
					 &iser_task->data[ISER_DIR_IN],
					 DMA_FROM_DEVICE);
		if (prot_count)
762
			iser_dma_unmap_task_data(iser_task,
763 764
						 &iser_task->prot[ISER_DIR_IN],
						 DMA_FROM_DEVICE);
765
	}
766

767
	if (iser_task->dir[ISER_DIR_OUT]) {
768
		iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
769 770 771 772
		iser_dma_unmap_task_data(iser_task,
					 &iser_task->data[ISER_DIR_OUT],
					 DMA_TO_DEVICE);
		if (prot_count)
773
			iser_dma_unmap_task_data(iser_task,
774 775
						 &iser_task->prot[ISER_DIR_OUT],
						 DMA_TO_DEVICE);
776
	}
777
}