iser_initiator.c 21.6 KB
Newer Older
1 2
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/kfifo.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>

#include "iscsi_iser.h"

/* Register user buffer memory and initialize passive rdma
44 45 46
 *  dto descriptor. Data size is stored in
 *  task->data[ISER_DIR_IN].data_len, Protection size
 *  os stored in task->prot[ISER_DIR_IN].data_len
47
 */
48
static int iser_prepare_read_cmd(struct iscsi_task *task)
49 50

{
51
	struct iscsi_iser_task *iser_task = task->dd_data;
52
	struct iser_mem_reg *mem_reg;
53
	int err;
54
	struct iser_ctrl *hdr = &iser_task->desc.iser_header;
55
	struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
56

57
	err = iser_dma_map_task_data(iser_task,
58 59 60 61 62 63
				     buf_in,
				     ISER_DIR_IN,
				     DMA_FROM_DEVICE);
	if (err)
		return err;

64 65 66 67 68 69 70 71 72 73 74
	if (scsi_prot_sg_count(iser_task->sc)) {
		struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];

		err = iser_dma_map_task_data(iser_task,
					     pbuf_in,
					     ISER_DIR_IN,
					     DMA_FROM_DEVICE);
		if (err)
			return err;
	}

75
	err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN, false);
76 77 78 79
	if (err) {
		iser_err("Failed to set up Data-IN RDMA\n");
		return err;
	}
80
	mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
81 82

	hdr->flags    |= ISER_RSV;
83
	hdr->read_stag = cpu_to_be32(mem_reg->rkey);
84
	hdr->read_va   = cpu_to_be64(mem_reg->sge.addr);
85 86

	iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
87
		 task->itt, mem_reg->rkey,
88
		 (unsigned long long)mem_reg->sge.addr);
89 90 91 92 93

	return 0;
}

/* Register user buffer memory and initialize passive rdma
94 95 96
 *  dto descriptor. Data size is stored in
 *  task->data[ISER_DIR_OUT].data_len, Protection size
 *  is stored at task->prot[ISER_DIR_OUT].data_len
97 98
 */
static int
99
iser_prepare_write_cmd(struct iscsi_task *task,
100 101 102 103
		       unsigned int imm_sz,
		       unsigned int unsol_sz,
		       unsigned int edtl)
{
104
	struct iscsi_iser_task *iser_task = task->dd_data;
105
	struct iser_mem_reg *mem_reg;
106
	int err;
107
	struct iser_ctrl *hdr = &iser_task->desc.iser_header;
108
	struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
109
	struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
110

111
	err = iser_dma_map_task_data(iser_task,
112 113 114 115 116 117
				     buf_out,
				     ISER_DIR_OUT,
				     DMA_TO_DEVICE);
	if (err)
		return err;

118 119 120 121 122 123 124 125 126 127 128
	if (scsi_prot_sg_count(iser_task->sc)) {
		struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];

		err = iser_dma_map_task_data(iser_task,
					     pbuf_out,
					     ISER_DIR_OUT,
					     DMA_TO_DEVICE);
		if (err)
			return err;
	}

129 130
	err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT,
				buf_out->data_len == imm_sz);
131 132 133 134 135
	if (err != 0) {
		iser_err("Failed to register write cmd RDMA mem\n");
		return err;
	}

136
	mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
137 138 139

	if (unsol_sz < edtl) {
		hdr->flags     |= ISER_WSV;
140
		hdr->write_stag = cpu_to_be32(mem_reg->rkey);
141
		hdr->write_va   = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
142 143 144

		iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
			 "VA:%#llX + unsol:%d\n",
145
			 task->itt, mem_reg->rkey,
146
			 (unsigned long long)mem_reg->sge.addr, unsol_sz);
147 148 149 150
	}

	if (imm_sz > 0) {
		iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
151
			 task->itt, imm_sz);
152
		tx_dsg->addr = mem_reg->sge.addr;
153
		tx_dsg->length = imm_sz;
154
		tx_dsg->lkey = mem_reg->sge.lkey;
155
		iser_task->desc.num_sge = 2;
156 157 158 159 160 161
	}

	return 0;
}

/* creates a new tx descriptor and adds header regd buffer */
162
static void iser_create_send_desc(struct iser_conn	*iser_conn,
163
				  struct iser_tx_desc	*tx_desc)
164
{
S
Sagi Grimberg 已提交
165
	struct iser_device *device = iser_conn->ib_conn.device;
166

167 168
	ib_dma_sync_single_for_cpu(device->ib_device,
		tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
169

170
	memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
171
	tx_desc->iser_header.flags = ISER_VER;
172
	tx_desc->num_sge = 1;
173 174
}

175
static void iser_free_login_buf(struct iser_conn *iser_conn)
176
{
S
Sagi Grimberg 已提交
177
	struct iser_device *device = iser_conn->ib_conn.device;
178
	struct iser_login_desc *desc = &iser_conn->login_desc;
S
Sagi Grimberg 已提交
179

180
	if (!desc->req)
181 182
		return;

183 184
	ib_dma_unmap_single(device->ib_device, desc->req_dma,
			    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
185

186 187
	ib_dma_unmap_single(device->ib_device, desc->rsp_dma,
			    ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
188

189 190
	kfree(desc->req);
	kfree(desc->rsp);
191 192

	/* make sure we never redo any unmapping */
193 194
	desc->req = NULL;
	desc->rsp = NULL;
195 196
}

197
static int iser_alloc_login_buf(struct iser_conn *iser_conn)
198
{
S
Sagi Grimberg 已提交
199
	struct iser_device *device = iser_conn->ib_conn.device;
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
	struct iser_login_desc *desc = &iser_conn->login_desc;

	desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
	if (!desc->req)
		return -ENOMEM;

	desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
					  ISCSI_DEF_MAX_RECV_SEG_LEN,
					  DMA_TO_DEVICE);
	if (ib_dma_mapping_error(device->ib_device,
				desc->req_dma))
		goto free_req;

	desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
	if (!desc->rsp)
		goto unmap_req;

	desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
					   ISER_RX_LOGIN_SIZE,
					   DMA_FROM_DEVICE);
	if (ib_dma_mapping_error(device->ib_device,
				desc->rsp_dma))
		goto free_rsp;

224 225
	return 0;

226 227 228 229 230 231 232 233
free_rsp:
	kfree(desc->rsp);
unmap_req:
	ib_dma_unmap_single(device->ib_device, desc->req_dma,
			    ISCSI_DEF_MAX_RECV_SEG_LEN,
			    DMA_TO_DEVICE);
free_req:
	kfree(desc->req);
234 235 236

	return -ENOMEM;
}
237

238 239
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
			      struct iscsi_session *session)
240 241 242 243 244
{
	int i, j;
	u64 dma_addr;
	struct iser_rx_desc *rx_desc;
	struct ib_sge       *rx_sg;
S
Sagi Grimberg 已提交
245 246
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
247

248 249 250
	iser_conn->qp_max_recv_dtos = session->cmds_max;
	iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
	iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
251

252
	if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max,
253
					   iser_conn->scsi_sg_tablesize))
254
		goto create_rdma_reg_res_failed;
255

256
	if (iser_alloc_login_buf(iser_conn))
257 258
		goto alloc_login_buf_fail;

259 260
	iser_conn->num_rx_descs = session->cmds_max;
	iser_conn->rx_descs = kmalloc(iser_conn->num_rx_descs *
261
				sizeof(struct iser_rx_desc), GFP_KERNEL);
262
	if (!iser_conn->rx_descs)
263 264
		goto rx_desc_alloc_fail;

265
	rx_desc = iser_conn->rx_descs;
266

267
	for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)  {
268 269 270 271 272 273
		dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
					ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
		if (ib_dma_mapping_error(device->ib_device, dma_addr))
			goto rx_desc_dma_map_failed;

		rx_desc->dma_addr = dma_addr;
274
		rx_desc->cqe.done = iser_task_rsp;
275
		rx_sg = &rx_desc->rx_sg;
276
		rx_sg->addr = rx_desc->dma_addr;
277
		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
278
		rx_sg->lkey = device->pd->local_dma_lkey;
279 280
	}

281
	iser_conn->rx_desc_head = 0;
282 283 284
	return 0;

rx_desc_dma_map_failed:
285
	rx_desc = iser_conn->rx_descs;
286 287
	for (j = 0; j < i; j++, rx_desc++)
		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
288
				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
289 290
	kfree(iser_conn->rx_descs);
	iser_conn->rx_descs = NULL;
291
rx_desc_alloc_fail:
292
	iser_free_login_buf(iser_conn);
293
alloc_login_buf_fail:
S
Sagi Grimberg 已提交
294
	device->reg_ops->free_reg_res(ib_conn);
295
create_rdma_reg_res_failed:
296 297 298 299
	iser_err("failed allocating rx descriptors / data buffers\n");
	return -ENOMEM;
}

300
void iser_free_rx_descriptors(struct iser_conn *iser_conn)
301 302 303
{
	int i;
	struct iser_rx_desc *rx_desc;
S
Sagi Grimberg 已提交
304 305
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
306

S
Sagi Grimberg 已提交
307 308
	if (device->reg_ops->free_reg_res)
		device->reg_ops->free_reg_res(ib_conn);
309

310 311
	rx_desc = iser_conn->rx_descs;
	for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
312
		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
313
				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
314
	kfree(iser_conn->rx_descs);
315
	/* make sure we never redo any unmapping */
316
	iser_conn->rx_descs = NULL;
317

318
	iser_free_login_buf(iser_conn);
319 320
}

321
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
322
{
323
	struct iser_conn *iser_conn = conn->dd_data;
S
Sagi Grimberg 已提交
324
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
325
	struct iscsi_session *session = conn->session;
326

327 328 329 330
	iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
	/* check if this is the last login - going to full feature phase */
	if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
		return 0;
331

332
	/*
333 334
	 * Check that there is one posted recv buffer
	 * (for the last login response).
335
	 */
S
Sagi Grimberg 已提交
336
	WARN_ON(ib_conn->post_recv_buf_count != 1);
337

338 339 340 341 342
	if (session->discovery_sess) {
		iser_info("Discovery session, re-using login RX buffer\n");
		return 0;
	} else
		iser_info("Normal session, posting batch of RX %d buffers\n",
343
			  iser_conn->min_posted_rx);
344

345
	/* Initial post receive buffers */
346
	if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx))
347 348
		return -ENOMEM;

349 350 351
	return 0;
}

M
Max Gurtovoy 已提交
352
static inline bool iser_signal_comp(u8 sig_count)
353 354 355 356
{
	return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
}

357 358 359
/**
 * iser_send_command - send command PDU
 */
360
int iser_send_command(struct iscsi_conn *conn,
361
		      struct iscsi_task *task)
362
{
363
	struct iser_conn *iser_conn = conn->dd_data;
364
	struct iscsi_iser_task *iser_task = task->dd_data;
365
	unsigned long edtl;
366
	int err;
367
	struct iser_data_buf *data_buf, *prot_buf;
368
	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
369
	struct scsi_cmnd *sc  =  task->sc;
370
	struct iser_tx_desc *tx_desc = &iser_task->desc;
M
Max Gurtovoy 已提交
371
	u8 sig_count = ++iser_conn->ib_conn.sig_count;
372 373 374 375

	edtl = ntohl(hdr->data_length);

	/* build the tx desc regd header and add it to the tx desc dto */
376
	tx_desc->type = ISCSI_TX_SCSI_COMMAND;
377
	tx_desc->cqe.done = iser_cmd_comp;
378
	iser_create_send_desc(iser_conn, tx_desc);
379

380
	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
381
		data_buf = &iser_task->data[ISER_DIR_IN];
382 383
		prot_buf = &iser_task->prot[ISER_DIR_IN];
	} else {
384
		data_buf = &iser_task->data[ISER_DIR_OUT];
385 386
		prot_buf = &iser_task->prot[ISER_DIR_OUT];
	}
387

388
	if (scsi_sg_count(sc)) { /* using a scatter list */
389
		data_buf->sg = scsi_sglist(sc);
390
		data_buf->size = scsi_sg_count(sc);
391
	}
392
	data_buf->data_len = scsi_bufflen(sc);
393

394
	if (scsi_prot_sg_count(sc)) {
395
		prot_buf->sg  = scsi_prot_sglist(sc);
396
		prot_buf->size = scsi_prot_sg_count(sc);
397 398
		prot_buf->data_len = (data_buf->data_len >>
				     ilog2(sc->device->sector_size)) * 8;
399 400
	}

401
	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
402
		err = iser_prepare_read_cmd(task);
403 404 405 406
		if (err)
			goto send_command_error;
	}
	if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
407 408 409
		err = iser_prepare_write_cmd(task,
					     task->imm_count,
				             task->imm_count +
410
					     task->unsol_r2t.data_length,
411 412 413 414 415
					     edtl);
		if (err)
			goto send_command_error;
	}

416
	iser_task->status = ISER_TASK_STATUS_STARTED;
417

418
	err = iser_post_send(&iser_conn->ib_conn, tx_desc,
M
Max Gurtovoy 已提交
419
			     iser_signal_comp(sig_count));
420 421 422 423
	if (!err)
		return 0;

send_command_error:
424
	iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
425 426 427 428 429 430
	return err;
}

/**
 * iser_send_data_out - send data out PDU
 */
431
int iser_send_data_out(struct iscsi_conn *conn,
432
		       struct iscsi_task *task,
433 434
		       struct iscsi_data *hdr)
{
435
	struct iser_conn *iser_conn = conn->dd_data;
436
	struct iscsi_iser_task *iser_task = task->dd_data;
437
	struct iser_tx_desc *tx_desc = NULL;
438
	struct iser_mem_reg *mem_reg;
439 440
	unsigned long buf_offset;
	unsigned long data_seg_len;
441
	uint32_t itt;
442
	int err;
443 444
	struct ib_sge *tx_dsg;

445
	itt = (__force uint32_t)hdr->itt;
446 447 448 449 450 451
	data_seg_len = ntoh24(hdr->dlength);
	buf_offset   = ntohl(hdr->offset);

	iser_dbg("%s itt %d dseg_len %d offset %d\n",
		 __func__,(int)itt,(int)data_seg_len,(int)buf_offset);

O
Or Gerlitz 已提交
452
	tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
453 454 455 456 457 458
	if (tx_desc == NULL) {
		iser_err("Failed to alloc desc for post dataout\n");
		return -ENOMEM;
	}

	tx_desc->type = ISCSI_TX_DATAOUT;
459
	tx_desc->cqe.done = iser_dataout_comp;
460
	tx_desc->iser_header.flags = ISER_VER;
461 462
	memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));

463
	/* build the tx desc */
464 465 466
	err = iser_initialize_task_headers(task, tx_desc);
	if (err)
		goto send_data_out_error;
467

468
	mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
469
	tx_dsg = &tx_desc->tx_sg[1];
470 471 472
	tx_dsg->addr = mem_reg->sge.addr + buf_offset;
	tx_dsg->length = data_seg_len;
	tx_dsg->lkey = mem_reg->sge.lkey;
473
	tx_desc->num_sge = 2;
474

475
	if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
476 477 478
		iser_err("Offset:%ld & DSL:%ld in Data-Out "
			 "inconsistent with total len:%ld, itt:%d\n",
			 buf_offset, data_seg_len,
479
			 iser_task->data[ISER_DIR_OUT].data_len, itt);
480 481 482 483 484 485 486
		err = -EINVAL;
		goto send_data_out_error;
	}
	iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
		 itt, buf_offset, data_seg_len);


487
	err = iser_post_send(&iser_conn->ib_conn, tx_desc, true);
488 489 490 491 492
	if (!err)
		return 0;

send_data_out_error:
	kmem_cache_free(ig.desc_cache, tx_desc);
493
	iser_err("conn %p failed err %d\n", conn, err);
494 495 496 497
	return err;
}

int iser_send_control(struct iscsi_conn *conn,
498
		      struct iscsi_task *task)
499
{
500
	struct iser_conn *iser_conn = conn->dd_data;
501
	struct iscsi_iser_task *iser_task = task->dd_data;
502
	struct iser_tx_desc *mdesc = &iser_task->desc;
503
	unsigned long data_seg_len;
504
	int err = 0;
505 506 507 508
	struct iser_device *device;

	/* build the tx desc regd header and add it to the tx desc dto */
	mdesc->type = ISCSI_TX_CONTROL;
509
	mdesc->cqe.done = iser_ctrl_comp;
510
	iser_create_send_desc(iser_conn, mdesc);
511

S
Sagi Grimberg 已提交
512
	device = iser_conn->ib_conn.device;
513

514
	data_seg_len = ntoh24(task->hdr->dlength);
515 516

	if (data_seg_len > 0) {
517
		struct iser_login_desc *desc = &iser_conn->login_desc;
518
		struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
519

520 521 522 523
		if (task != conn->login_task) {
			iser_err("data present on non login task!!!\n");
			goto send_control_error;
		}
524

525 526
		ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma,
					   task->data_count, DMA_TO_DEVICE);
527

528
		memcpy(desc->req, task->data, task->data_count);
529

530 531
		ib_dma_sync_single_for_device(device->ib_device, desc->req_dma,
					      task->data_count, DMA_TO_DEVICE);
532

533 534 535
		tx_dsg->addr = desc->req_dma;
		tx_dsg->length = task->data_count;
		tx_dsg->lkey = device->pd->local_dma_lkey;
536
		mdesc->num_sge = 2;
537 538
	}

539
	if (task == conn->login_task) {
540 541
		iser_dbg("op %x dsl %lx, posting login rx buffer\n",
			 task->hdr->opcode, data_seg_len);
542
		err = iser_post_recvl(iser_conn);
543 544
		if (err)
			goto send_control_error;
545 546 547
		err = iser_post_rx_bufs(conn, task->hdr);
		if (err)
			goto send_control_error;
548 549
	}

550
	err = iser_post_send(&iser_conn->ib_conn, mdesc, true);
551 552 553 554 555 556 557 558
	if (!err)
		return 0;

send_control_error:
	iser_err("conn %p failed err %d\n",conn, err);
	return err;
}

559
void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
560
{
561
	struct ib_conn *ib_conn = wc->qp->qp_context;
562
	struct iser_conn *iser_conn = to_iser_conn(ib_conn);
563
	struct iser_login_desc *desc = iser_login(wc->wr_cqe);
564
	struct iscsi_hdr *hdr;
565
	char *data;
566 567 568 569 570 571 572 573 574 575 576
	int length;

	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		iser_err_comp(wc, "login_rsp");
		return;
	}

	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
				   desc->rsp_dma, ISER_RX_LOGIN_SIZE,
				   DMA_FROM_DEVICE);

577
	hdr = desc->rsp + sizeof(struct iser_ctrl);
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
	data = desc->rsp + ISER_HEADERS_LEN;
	length = wc->byte_len - ISER_HEADERS_LEN;

	iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
		 hdr->itt, length);

	iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length);

	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
				      desc->rsp_dma, ISER_RX_LOGIN_SIZE,
				      DMA_FROM_DEVICE);

	ib_conn->post_recv_buf_count--;
}

593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
static inline void
iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
{
	if (likely(rkey == desc->rsc.mr->rkey))
		desc->rsc.mr_valid = 0;
	else if (likely(rkey == desc->pi_ctx->sig_mr->rkey))
		desc->pi_ctx->sig_mr_valid = 0;
}

static int
iser_check_remote_inv(struct iser_conn *iser_conn,
		      struct ib_wc *wc,
		      struct iscsi_hdr *hdr)
{
	if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
		struct iscsi_task *task;
		u32 rkey = wc->ex.invalidate_rkey;

		iser_dbg("conn %p: remote invalidation for rkey %#x\n",
			 iser_conn, rkey);

		if (unlikely(!iser_conn->snd_w_inv)) {
			iser_err("conn %p: unexepected remote invalidation, "
				 "terminating connection\n", iser_conn);
			return -EPROTO;
		}

		task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt);
		if (likely(task)) {
			struct iscsi_iser_task *iser_task = task->dd_data;
			struct iser_fr_desc *desc;

			if (iser_task->dir[ISER_DIR_IN]) {
				desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h;
				iser_inv_desc(desc, rkey);
			}

			if (iser_task->dir[ISER_DIR_OUT]) {
				desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h;
				iser_inv_desc(desc, rkey);
			}
		} else {
			iser_err("failed to get task for itt=%d\n", hdr->itt);
			return -EINVAL;
		}
	}

	return 0;
}


644 645 646 647 648 649 650 651 652 653 654 655
void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
{
	struct ib_conn *ib_conn = wc->qp->qp_context;
	struct iser_conn *iser_conn = to_iser_conn(ib_conn);
	struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
	struct iscsi_hdr *hdr;
	int length;
	int outstanding, count, err;

	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		iser_err_comp(wc, "task_rsp");
		return;
656
	}
657

658 659 660
	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
				   desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
				   DMA_FROM_DEVICE);
661

662 663
	hdr = &desc->iscsi_header;
	length = wc->byte_len - ISER_HEADERS_LEN;
664

665
	iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
666
		 hdr->itt, length);
667

668 669 670 671 672 673
	if (iser_check_remote_inv(iser_conn, wc, hdr)) {
		iscsi_conn_failure(iser_conn->iscsi_conn,
				   ISCSI_ERR_CONN_FAILED);
		return;
	}

674
	iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length);
675

676 677 678
	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
				      desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
				      DMA_FROM_DEVICE);
679 680 681 682 683

	/* decrementing conn->post_recv_buf_count only --after-- freeing the   *
	 * task eliminates the need to worry on tasks which are completed in   *
	 * parallel to the execution of iser_conn_term. So the code that waits *
	 * for the posted rx bufs refcount to become zero handles everything   */
S
Sagi Grimberg 已提交
684
	ib_conn->post_recv_buf_count--;
685

S
Sagi Grimberg 已提交
686
	outstanding = ib_conn->post_recv_buf_count;
687 688 689 690
	if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
		count = min(iser_conn->qp_max_recv_dtos - outstanding,
			    iser_conn->min_posted_rx);
		err = iser_post_recvm(iser_conn, count);
691 692 693
		if (err)
			iser_err("posting %d rx bufs err %d\n", count, err);
	}
694 695
}

696 697 698 699 700 701 702
void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
{
	if (unlikely(wc->status != IB_WC_SUCCESS))
		iser_err_comp(wc, "command");
}

void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc)
703
{
704
	struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
705
	struct iscsi_task *task;
706

707 708 709
	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		iser_err_comp(wc, "control");
		return;
710
	}
711

712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
	/* this arithmetic is legal by libiscsi dd_data allocation */
	task = (void *)desc - sizeof(struct iscsi_task);
	if (task->hdr->itt == RESERVED_ITT)
		iscsi_put_task(task);
}

void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
{
	struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
	struct ib_conn *ib_conn = wc->qp->qp_context;
	struct iser_device *device = ib_conn->device;

	if (unlikely(wc->status != IB_WC_SUCCESS))
		iser_err_comp(wc, "dataout");

	ib_dma_unmap_single(device->ib_device, desc->dma_addr,
			    ISER_HEADERS_LEN, DMA_TO_DEVICE);
	kmem_cache_free(ig.desc_cache, desc);
}

void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc)
{
	struct ib_conn *ib_conn = wc->qp->qp_context;

	complete(&ib_conn->last_comp);
737 738
}

739
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
740 741

{
742
	iser_task->status = ISER_TASK_STATUS_INIT;
743

744 745
	iser_task->dir[ISER_DIR_IN] = 0;
	iser_task->dir[ISER_DIR_OUT] = 0;
746

747 748
	iser_task->data[ISER_DIR_IN].data_len  = 0;
	iser_task->data[ISER_DIR_OUT].data_len = 0;
749

750 751 752
	iser_task->prot[ISER_DIR_IN].data_len  = 0;
	iser_task->prot[ISER_DIR_OUT].data_len = 0;

753 754 755 756
	memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
	       sizeof(struct iser_mem_reg));
	memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
	       sizeof(struct iser_mem_reg));
757 758
}

759
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
760
{
761
	int prot_count = scsi_prot_sg_count(iser_task->sc);
762

763
	if (iser_task->dir[ISER_DIR_IN]) {
764
		iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
765 766 767 768
		iser_dma_unmap_task_data(iser_task,
					 &iser_task->data[ISER_DIR_IN],
					 DMA_FROM_DEVICE);
		if (prot_count)
769
			iser_dma_unmap_task_data(iser_task,
770 771
						 &iser_task->prot[ISER_DIR_IN],
						 DMA_FROM_DEVICE);
772
	}
773

774
	if (iser_task->dir[ISER_DIR_OUT]) {
775
		iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
776 777 778 779
		iser_dma_unmap_task_data(iser_task,
					 &iser_task->data[ISER_DIR_OUT],
					 DMA_TO_DEVICE);
		if (prot_count)
780
			iser_dma_unmap_task_data(iser_task,
781 782
						 &iser_task->prot[ISER_DIR_OUT],
						 DMA_TO_DEVICE);
783
	}
784
}