iser_verbs.c 32.7 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/module.h>
36
#include <linux/slab.h>
37 38 39 40 41
#include <linux/delay.h>

#include "iscsi_iser.h"

#define ISCSI_ISER_MAX_CONN	8
42 43
#define ISER_MAX_RX_CQ_LEN	(ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
#define ISER_MAX_TX_CQ_LEN	(ISER_QP_MAX_REQ_DTOS  * ISCSI_ISER_MAX_CONN)
44 45 46 47 48 49 50 51 52 53 54 55 56 57

static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);

static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
	iser_err("got cq event %d \n", cause->event);
}

static void iser_qp_event_callback(struct ib_event *cause, void *context)
{
	iser_err("got qp event %d\n",cause->event);
}

58 59 60 61 62 63 64
static void iser_event_handler(struct ib_event_handler *handler,
				struct ib_event *event)
{
	iser_err("async event %d on device %s port %d\n", event->event,
		event->device->name, event->element.port_num);
}

65 66 67 68 69 70 71 72 73
/**
 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
 * the adapator.
 *
 * returns 0 on success, -1 on failure
 */
static int iser_create_device_ib_res(struct iser_device *device)
{
74
	struct iser_cq_desc *cq_desc;
75 76
	struct ib_device_attr *dev_attr = &device->dev_attr;
	int ret, i, j;
77

78 79
	ret = ib_query_device(device->ib_device, dev_attr);
	if (ret) {
80
		pr_warn("Query device failed for %s\n", device->ib_device->name);
81
		return ret;
82 83 84 85 86 87 88 89 90 91 92 93
	}

	/* Assign function handles  - based on FMR support */
	if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
	    device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
		iser_info("FMR supported, using FMR for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
		device->iser_free_rdma_reg_res = iser_free_fmr_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
	} else
	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
94 95 96 97 98
		iser_info("FastReg supported, using FastReg for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
		device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
99
	} else {
100
		iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
101
		return -1;
102
	}
103

104
	device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
105 106 107
	iser_info("using %d CQs, device %s supports %d vectors\n",
		  device->cqs_used, device->ib_device->name,
		  device->ib_device->num_comp_vectors);
108 109 110 111 112 113 114

	device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
				  GFP_KERNEL);
	if (device->cq_desc == NULL)
		goto cq_desc_err;
	cq_desc = device->cq_desc;

115 116 117 118
	device->pd = ib_alloc_pd(device->ib_device);
	if (IS_ERR(device->pd))
		goto pd_err;

119 120 121 122 123 124 125 126 127 128 129
	for (i = 0; i < device->cqs_used; i++) {
		cq_desc[i].device   = device;
		cq_desc[i].cq_index = i;

		device->rx_cq[i] = ib_create_cq(device->ib_device,
					  iser_cq_callback,
					  iser_cq_event_callback,
					  (void *)&cq_desc[i],
					  ISER_MAX_RX_CQ_LEN, i);
		if (IS_ERR(device->rx_cq[i]))
			goto cq_err;
130

131 132 133 134
		device->tx_cq[i] = ib_create_cq(device->ib_device,
					  NULL, iser_cq_event_callback,
					  (void *)&cq_desc[i],
					  ISER_MAX_TX_CQ_LEN, i);
135

136 137
		if (IS_ERR(device->tx_cq[i]))
			goto cq_err;
138

139 140
		if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
			goto cq_err;
141

142 143 144 145
		tasklet_init(&device->cq_tasklet[i],
			     iser_cq_tasklet_fn,
			(unsigned long)&cq_desc[i]);
	}
146

147 148 149
	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
				   IB_ACCESS_REMOTE_WRITE |
				   IB_ACCESS_REMOTE_READ);
150 151 152
	if (IS_ERR(device->mr))
		goto dma_mr_err;

153 154 155 156 157
	INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
				iser_event_handler);
	if (ib_register_event_handler(&device->event_handler))
		goto handler_err;

158 159
	return 0;

160 161
handler_err:
	ib_dereg_mr(device->mr);
162
dma_mr_err:
163 164 165 166 167 168 169 170 171
	for (j = 0; j < device->cqs_used; j++)
		tasklet_kill(&device->cq_tasklet[j]);
cq_err:
	for (j = 0; j < i; j++) {
		if (device->tx_cq[j])
			ib_destroy_cq(device->tx_cq[j]);
		if (device->rx_cq[j])
			ib_destroy_cq(device->rx_cq[j]);
	}
172 173
	ib_dealloc_pd(device->pd);
pd_err:
174 175
	kfree(device->cq_desc);
cq_desc_err:
176 177 178 179 180
	iser_err("failed to allocate an IB resource\n");
	return -1;
}

/**
181
 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
182 183 184 185
 * CQ and PD created with the device associated with the adapator.
 */
static void iser_free_device_ib_res(struct iser_device *device)
{
186
	int i;
187 188
	BUG_ON(device->mr == NULL);

189 190 191 192 193 194 195 196
	for (i = 0; i < device->cqs_used; i++) {
		tasklet_kill(&device->cq_tasklet[i]);
		(void)ib_destroy_cq(device->tx_cq[i]);
		(void)ib_destroy_cq(device->rx_cq[i]);
		device->tx_cq[i] = NULL;
		device->rx_cq[i] = NULL;
	}

197
	(void)ib_unregister_event_handler(&device->event_handler);
198 199 200
	(void)ib_dereg_mr(device->mr);
	(void)ib_dealloc_pd(device->pd);

201 202
	kfree(device->cq_desc);

203 204 205 206 207
	device->mr = NULL;
	device->pd = NULL;
}

/**
208
 * iser_create_fmr_pool - Creates FMR pool and page_vector
209
 *
210
 * returns 0 on success, or errno code on failure
211
 */
212
int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
213
{
214
	struct iser_device *device = ib_conn->device;
215
	struct ib_fmr_pool_param params;
216
	int ret = -ENOMEM;
217

218 219 220 221
	ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
					(sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
					GFP_KERNEL);
	if (!ib_conn->fmr.page_vec)
222
		return ret;
223

224
	ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
225

226
	params.page_shift        = SHIFT_4K;
227 228 229 230 231
	/* when the first/last SG element are not start/end *
	 * page aligned, the map whould be of N+1 pages     */
	params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
	/* make the pool size twice the max number of SCSI commands *
	 * the ML is expected to queue, watermark for unmap at 50%  */
232 233
	params.pool_size	 = cmds_max * 2;
	params.dirty_watermark	 = cmds_max;
234 235 236 237 238 239
	params.cache		 = 0;
	params.flush_function	 = NULL;
	params.access		 = (IB_ACCESS_LOCAL_WRITE  |
				    IB_ACCESS_REMOTE_WRITE |
				    IB_ACCESS_REMOTE_READ);

240 241
	ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
	if (!IS_ERR(ib_conn->fmr.pool))
242 243 244
		return 0;

	/* no FMR => no need for page_vec */
245 246
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
247

248 249
	ret = PTR_ERR(ib_conn->fmr.pool);
	ib_conn->fmr.pool = NULL;
250 251 252 253
	if (ret != -ENOSYS) {
		iser_err("FMR allocation failed, err %d\n", ret);
		return ret;
	} else {
254
		iser_warn("FMRs are not supported, using unaligned mode\n");
255
		return 0;
256
	}
257 258 259 260 261 262 263 264
}

/**
 * iser_free_fmr_pool - releases the FMR pool and page vec
 */
void iser_free_fmr_pool(struct iser_conn *ib_conn)
{
	iser_info("freeing conn %p fmr pool %p\n",
265
		  ib_conn, ib_conn->fmr.pool);
266

267 268
	if (ib_conn->fmr.pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr.pool);
269

270
	ib_conn->fmr.pool = NULL;
271

272 273
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
274 275
}

276 277
static int
iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
278
			 bool pi_enable, struct fast_reg_descriptor *desc)
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
{
	int ret;

	desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
						      ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_frpl)) {
		ret = PTR_ERR(desc->data_frpl);
		iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
			 ret);
		return PTR_ERR(desc->data_frpl);
	}

	desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_mr)) {
		ret = PTR_ERR(desc->data_mr);
		iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
		goto fast_reg_mr_failure;
	}
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
	desc->reg_indicators |= ISER_DATA_KEY_VALID;

	if (pi_enable) {
		struct ib_mr_init_attr mr_init_attr = {0};
		struct iser_pi_context *pi_ctx = NULL;

		desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
		if (!desc->pi_ctx) {
			iser_err("Failed to allocate pi context\n");
			ret = -ENOMEM;
			goto pi_ctx_alloc_failure;
		}
		pi_ctx = desc->pi_ctx;

		pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
						    ISCSI_ISER_SG_TABLESIZE);
		if (IS_ERR(pi_ctx->prot_frpl)) {
			ret = PTR_ERR(pi_ctx->prot_frpl);
			iser_err("Failed to allocate prot frpl ret=%d\n",
				 ret);
			goto prot_frpl_failure;
		}

		pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
						ISCSI_ISER_SG_TABLESIZE + 1);
		if (IS_ERR(pi_ctx->prot_mr)) {
			ret = PTR_ERR(pi_ctx->prot_mr);
			iser_err("Failed to allocate prot frmr ret=%d\n",
				 ret);
			goto prot_mr_failure;
		}
		desc->reg_indicators |= ISER_PROT_KEY_VALID;

		mr_init_attr.max_reg_descriptors = 2;
		mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
		pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
		if (IS_ERR(pi_ctx->sig_mr)) {
			ret = PTR_ERR(pi_ctx->sig_mr);
			iser_err("Failed to allocate signature enabled mr err=%d\n",
				 ret);
			goto sig_mr_failure;
		}
		desc->reg_indicators |= ISER_SIG_KEY_VALID;
	}
	desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;

343 344
	iser_dbg("Create fr_desc %p page_list %p\n",
		 desc, desc->data_frpl->page_list);
345 346

	return 0;
347 348 349 350 351 352 353 354
sig_mr_failure:
	ib_dereg_mr(desc->pi_ctx->prot_mr);
prot_mr_failure:
	ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
prot_frpl_failure:
	kfree(desc->pi_ctx);
pi_ctx_alloc_failure:
	ib_dereg_mr(desc->data_mr);
355 356 357 358 359 360
fast_reg_mr_failure:
	ib_free_fast_reg_page_list(desc->data_frpl);

	return ret;
}

361
/**
362
 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
363 364 365
 * for fast registration work requests.
 * returns 0 on success, or errno code on failure
 */
366
int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max)
367 368 369 370 371
{
	struct iser_device	*device = ib_conn->device;
	struct fast_reg_descriptor	*desc;
	int i, ret;

372 373
	INIT_LIST_HEAD(&ib_conn->fastreg.pool);
	ib_conn->fastreg.pool_size = 0;
374
	for (i = 0; i < cmds_max; i++) {
375
		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
376 377 378 379 380 381
		if (!desc) {
			iser_err("Failed to allocate a new fast_reg descriptor\n");
			ret = -ENOMEM;
			goto err;
		}

382 383
		ret = iser_create_fastreg_desc(device->ib_device, device->pd,
					       ib_conn->pi_support, desc);
384 385 386 387 388
		if (ret) {
			iser_err("Failed to create fastreg descriptor err=%d\n",
				 ret);
			kfree(desc);
			goto err;
389 390
		}

391 392
		list_add_tail(&desc->list, &ib_conn->fastreg.pool);
		ib_conn->fastreg.pool_size++;
393 394 395
	}

	return 0;
396

397
err:
398
	iser_free_fastreg_pool(ib_conn);
399 400 401 402
	return ret;
}

/**
403
 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
404
 */
405
void iser_free_fastreg_pool(struct iser_conn *ib_conn)
406 407 408 409
{
	struct fast_reg_descriptor *desc, *tmp;
	int i = 0;

410
	if (list_empty(&ib_conn->fastreg.pool))
411 412
		return;

413
	iser_info("freeing conn %p fr pool\n", ib_conn);
414

415
	list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
416 417 418
		list_del(&desc->list);
		ib_free_fast_reg_page_list(desc->data_frpl);
		ib_dereg_mr(desc->data_mr);
419 420 421 422 423 424
		if (desc->pi_ctx) {
			ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
			ib_dereg_mr(desc->pi_ctx->prot_mr);
			ib_destroy_mr(desc->pi_ctx->sig_mr);
			kfree(desc->pi_ctx);
		}
425 426 427 428
		kfree(desc);
		++i;
	}

429
	if (i < ib_conn->fastreg.pool_size)
430
		iser_warn("pool still has %d regions registered\n",
431
			  ib_conn->fastreg.pool_size - i);
432 433
}

434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
/**
 * iser_create_ib_conn_res - Queue-Pair (QP)
 *
 * returns 0 on success, -1 on failure
 */
static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
{
	struct iser_device	*device;
	struct ib_qp_init_attr	init_attr;
	int			ret = -ENOMEM;
	int index, min_index = 0;

	BUG_ON(ib_conn->device == NULL);

	device = ib_conn->device;
449 450 451

	memset(&init_attr, 0, sizeof init_attr);

452 453 454 455 456 457 458 459
	mutex_lock(&ig.connlist_mutex);
	/* select the CQ with the minimal number of usages */
	for (index = 0; index < device->cqs_used; index++)
		if (device->cq_active_qps[index] <
		    device->cq_active_qps[min_index])
			min_index = index;
	device->cq_active_qps[min_index]++;
	mutex_unlock(&ig.connlist_mutex);
460
	iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
461

462 463
	init_attr.event_handler = iser_qp_event_callback;
	init_attr.qp_context	= (void *)ib_conn;
464 465
	init_attr.send_cq	= device->tx_cq[min_index];
	init_attr.recv_cq	= device->rx_cq[min_index];
466
	init_attr.cap.max_recv_wr  = ISER_QP_MAX_RECV_DTOS;
467
	init_attr.cap.max_send_sge = 2;
468
	init_attr.cap.max_recv_sge = 1;
469 470
	init_attr.sq_sig_type	= IB_SIGNAL_REQ_WR;
	init_attr.qp_type	= IB_QPT_RC;
471 472 473 474 475 476
	if (ib_conn->pi_support) {
		init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS;
		init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
	} else {
		init_attr.cap.max_send_wr  = ISER_QP_MAX_REQ_DTOS;
	}
477 478 479

	ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
	if (ret)
480
		goto out_err;
481 482

	ib_conn->qp = ib_conn->cma_id->qp;
483
	iser_info("setting conn %p cma_id %p qp %p\n",
484
		  ib_conn, ib_conn->cma_id,
485
		  ib_conn->cma_id->qp);
486 487
	return ret;

488
out_err:
489 490 491 492 493
	iser_err("unable to alloc mem or create resource, err %d\n", ret);
	return ret;
}

/**
494
 * releases the QP objects, returns 0 on success,
495 496
 * -1 on failure
 */
R
Roi Dayan 已提交
497
static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
498
{
499
	int cq_index;
500 501
	BUG_ON(ib_conn == NULL);

502
	iser_info("freeing conn %p cma_id %p qp %p\n",
503
		  ib_conn, ib_conn->cma_id,
504
		  ib_conn->qp);
505 506 507

	/* qp is created only once both addr & route are resolved */

508 509 510
	if (ib_conn->qp != NULL) {
		cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
		ib_conn->device->cq_active_qps[cq_index]--;
511

512 513
		rdma_destroy_qp(ib_conn->cma_id);
	}
514 515

	ib_conn->qp	  = NULL;
516

517 518 519 520 521 522 523 524 525 526
	return 0;
}

/**
 * based on the resolved device node GUID see if there already allocated
 * device for this device. If there's no such, create one.
 */
static
struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
{
A
Arne Redlich 已提交
527
	struct iser_device *device;
528 529 530

	mutex_lock(&ig.device_list_mutex);

A
Arne Redlich 已提交
531
	list_for_each_entry(device, &ig.device_list, ig_list)
532 533
		/* find if there's a match using the node GUID */
		if (device->ib_device->node_guid == cma_id->device->node_guid)
534
			goto inc_refcnt;
A
Arne Redlich 已提交
535 536 537 538 539 540 541 542 543 544 545 546

	device = kzalloc(sizeof *device, GFP_KERNEL);
	if (device == NULL)
		goto out;

	/* assign this device to the device */
	device->ib_device = cma_id->device;
	/* init the device and link it into ig device list */
	if (iser_create_device_ib_res(device)) {
		kfree(device);
		device = NULL;
		goto out;
547
	}
A
Arne Redlich 已提交
548 549
	list_add(&device->ig_list, &ig.device_list);

550
inc_refcnt:
551
	device->refcount++;
552
out:
553 554 555 556 557 558 559 560 561
	mutex_unlock(&ig.device_list_mutex);
	return device;
}

/* if there's no demand for this device, release it */
static void iser_device_try_release(struct iser_device *device)
{
	mutex_lock(&ig.device_list_mutex);
	device->refcount--;
562
	iser_info("device %p refcount %d\n", device, device->refcount);
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
	if (!device->refcount) {
		iser_free_device_ib_res(device);
		list_del(&device->ig_list);
		kfree(device);
	}
	mutex_unlock(&ig.device_list_mutex);
}

static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
				     enum iser_ib_conn_state comp,
				     enum iser_ib_conn_state exch)
{
	int ret;

	spin_lock_bh(&ib_conn->lock);
	if ((ret = (ib_conn->state == comp)))
		ib_conn->state = exch;
	spin_unlock_bh(&ib_conn->lock);
	return ret;
}

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
void iser_release_work(struct work_struct *work)
{
	struct iser_conn *ib_conn;

	ib_conn = container_of(work, struct iser_conn, release_work);

	/* wait for .conn_stop callback */
	wait_for_completion(&ib_conn->stop_completion);

	/* wait for the qp`s post send and post receive buffers to empty */
	wait_event_interruptible(ib_conn->wait,
				 ib_conn->state == ISER_CONN_DOWN);

	iser_conn_release(ib_conn);
}

600 601 602
/**
 * Frees all conn objects and deallocs conn descriptor
 */
603
void iser_conn_release(struct iser_conn *ib_conn)
604 605 606
{
	struct iser_device  *device = ib_conn->device;

607
	BUG_ON(ib_conn->state == ISER_CONN_UP);
608 609 610 611

	mutex_lock(&ig.connlist_mutex);
	list_del(&ib_conn->conn_list);
	mutex_unlock(&ig.connlist_mutex);
612
	iser_free_rx_descriptors(ib_conn);
R
Roi Dayan 已提交
613
	iser_free_ib_conn_res(ib_conn);
614 615 616 617
	ib_conn->device = NULL;
	/* on EVENT_ADDR_ERROR there's no device yet for this conn */
	if (device != NULL)
		iser_device_try_release(device);
R
Roi Dayan 已提交
618
	/* if cma handler context, the caller actually destroy the id */
619
	if (ib_conn->cma_id != NULL) {
R
Roi Dayan 已提交
620 621 622
		rdma_destroy_id(ib_conn->cma_id);
		ib_conn->cma_id = NULL;
	}
623
	kfree(ib_conn);
624 625
}

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
/**
 * triggers start of the disconnect procedures and wait for them to be done
 */
void iser_conn_terminate(struct iser_conn *ib_conn)
{
	int err = 0;

	/* change the ib conn state only if the conn is UP, however always call
	 * rdma_disconnect since this is the only way to cause the CMA to change
	 * the QP state to ERROR
	 */

	iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING);
	err = rdma_disconnect(ib_conn->cma_id);
	if (err)
		iser_err("Failed to disconnect, conn: 0x%p err %d\n",
			 ib_conn,err);
}

645
static void iser_connect_error(struct rdma_cm_id *cma_id)
646 647
{
	struct iser_conn *ib_conn;
648

649 650 651 652 653 654
	ib_conn = (struct iser_conn *)cma_id->context;

	ib_conn->state = ISER_CONN_DOWN;
	wake_up_interruptible(&ib_conn->wait);
}

655
static void iser_addr_handler(struct rdma_cm_id *cma_id)
656 657 658 659 660 661
{
	struct iser_device *device;
	struct iser_conn   *ib_conn;
	int    ret;

	device = iser_device_find_by_ib_device(cma_id);
662 663
	if (!device) {
		iser_err("device lookup/creation failed\n");
664 665
		iser_connect_error(cma_id);
		return;
666 667
	}

668 669 670
	ib_conn = (struct iser_conn *)cma_id->context;
	ib_conn->device = device;

671 672 673 674 675 676 677 678 679 680 681 682 683
	/* connection T10-PI support */
	if (iser_pi_enable) {
		if (!(device->dev_attr.device_cap_flags &
		      IB_DEVICE_SIGNATURE_HANDOVER)) {
			iser_warn("T10-PI requested but not supported on %s, "
				  "continue without T10-PI\n",
				  ib_conn->device->ib_device->name);
			ib_conn->pi_support = false;
		} else {
			ib_conn->pi_support = true;
		}
	}

684 685 686
	ret = rdma_resolve_route(cma_id, 1000);
	if (ret) {
		iser_err("resolve route failed: %d\n", ret);
687 688
		iser_connect_error(cma_id);
		return;
689 690 691
	}
}

692
static void iser_route_handler(struct rdma_cm_id *cma_id)
693 694 695
{
	struct rdma_conn_param conn_param;
	int    ret;
696
	struct iser_cm_hdr req_hdr;
697 698
	struct iser_conn *ib_conn = (struct iser_conn *)cma_id->context;
	struct iser_device *device = ib_conn->device;
699 700 701 702 703 704

	ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
	if (ret)
		goto failure;

	memset(&conn_param, 0, sizeof conn_param);
705
	conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
706 707 708 709
	conn_param.initiator_depth     = 1;
	conn_param.retry_count	       = 7;
	conn_param.rnr_retry_count     = 6;

710 711 712 713 714 715
	memset(&req_hdr, 0, sizeof(req_hdr));
	req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
			ISER_SEND_W_INV_NOT_SUPPORTED);
	conn_param.private_data		= (void *)&req_hdr;
	conn_param.private_data_len	= sizeof(struct iser_cm_hdr);

716 717 718 719 720 721
	ret = rdma_connect(cma_id, &conn_param);
	if (ret) {
		iser_err("failure connecting: %d\n", ret);
		goto failure;
	}

722
	return;
723
failure:
724
	iser_connect_error(cma_id);
725 726 727 728 729
}

static void iser_connected_handler(struct rdma_cm_id *cma_id)
{
	struct iser_conn *ib_conn;
730 731 732 733 734
	struct ib_qp_attr attr;
	struct ib_qp_init_attr init_attr;

	(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
	iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
735 736

	ib_conn = (struct iser_conn *)cma_id->context;
737 738
	if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_PENDING, ISER_CONN_UP))
		wake_up_interruptible(&ib_conn->wait);
739 740
}

741
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
742 743 744 745 746 747 748 749
{
	struct iser_conn *ib_conn;

	ib_conn = (struct iser_conn *)cma_id->context;

	/* getting here when the state is UP means that the conn is being *
	 * terminated asynchronously from the iSCSI layer's perspective.  */
	if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
750
					ISER_CONN_TERMINATING)){
751 752
		if (ib_conn->iscsi_conn)
			iscsi_conn_failure(ib_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED);
753 754 755
		else
			iser_err("iscsi_iser connection isn't bound\n");
	}
756 757

	/* Complete the termination process if no posts are pending */
758
	if (ib_conn->post_recv_buf_count == 0 &&
759 760 761 762 763 764 765 766
	    (atomic_read(&ib_conn->post_send_buf_count) == 0)) {
		ib_conn->state = ISER_CONN_DOWN;
		wake_up_interruptible(&ib_conn->wait);
	}
}

static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
767 768
	iser_info("event %d status %d conn %p id %p\n",
		  event->event, event->status, cma_id->context, cma_id);
769 770 771

	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
772
		iser_addr_handler(cma_id);
773 774
		break;
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
775
		iser_route_handler(cma_id);
776 777 778 779 780 781 782 783 784
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		iser_connected_handler(cma_id);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
	case RDMA_CM_EVENT_ROUTE_ERROR:
	case RDMA_CM_EVENT_CONNECT_ERROR:
	case RDMA_CM_EVENT_UNREACHABLE:
	case RDMA_CM_EVENT_REJECTED:
785
		iser_connect_error(cma_id);
786 787 788
		break;
	case RDMA_CM_EVENT_DISCONNECTED:
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
789
	case RDMA_CM_EVENT_ADDR_CHANGE:
790
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
791
		iser_disconnected_handler(cma_id);
792 793
		break;
	default:
794
		iser_err("Unexpected RDMA CM event (%d)\n", event->event);
795 796
		break;
	}
797
	return 0;
798 799
}

800
void iser_conn_init(struct iser_conn *ib_conn)
801 802 803
{
	ib_conn->state = ISER_CONN_INIT;
	init_waitqueue_head(&ib_conn->wait);
804
	ib_conn->post_recv_buf_count = 0;
805
	atomic_set(&ib_conn->post_send_buf_count, 0);
806
	init_completion(&ib_conn->stop_completion);
807 808 809 810 811 812
	INIT_LIST_HEAD(&ib_conn->conn_list);
	spin_lock_init(&ib_conn->lock);
}

 /**
 * starts the process of connecting to the target
813
 * sleeps until the connection is established or rejected
814 815
 */
int iser_connect(struct iser_conn   *ib_conn,
R
Roi Dayan 已提交
816 817
		 struct sockaddr    *src_addr,
		 struct sockaddr    *dst_addr,
818 819 820 821
		 int                 non_blocking)
{
	int err = 0;

R
Roi Dayan 已提交
822 823 824
	sprintf(ib_conn->name, "%pISp", dst_addr);

	iser_info("connecting to: %s\n", ib_conn->name);
825 826 827 828 829 830 831 832

	/* the device is known only --after-- address resolution */
	ib_conn->device = NULL;

	ib_conn->state = ISER_CONN_PENDING;

	ib_conn->cma_id = rdma_create_id(iser_cma_handler,
					     (void *)ib_conn,
833
					     RDMA_PS_TCP, IB_QPT_RC);
834 835 836 837 838 839
	if (IS_ERR(ib_conn->cma_id)) {
		err = PTR_ERR(ib_conn->cma_id);
		iser_err("rdma_create_id failed: %d\n", err);
		goto id_failure;
	}

R
Roi Dayan 已提交
840
	err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
	if (err) {
		iser_err("rdma_resolve_addr failed: %d\n", err);
		goto addr_failure;
	}

	if (!non_blocking) {
		wait_event_interruptible(ib_conn->wait,
					 (ib_conn->state != ISER_CONN_PENDING));

		if (ib_conn->state != ISER_CONN_UP) {
			err =  -EIO;
			goto connect_failure;
		}
	}

	mutex_lock(&ig.connlist_mutex);
	list_add(&ib_conn->conn_list, &ig.connlist);
	mutex_unlock(&ig.connlist_mutex);
	return 0;

id_failure:
	ib_conn->cma_id = NULL;
addr_failure:
	ib_conn->state = ISER_CONN_DOWN;
connect_failure:
866
	iser_conn_release(ib_conn);
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
	return err;
}

/**
 * iser_reg_page_vec - Register physical memory
 *
 * returns: 0 on success, errno code on failure
 */
int iser_reg_page_vec(struct iser_conn     *ib_conn,
		      struct iser_page_vec *page_vec,
		      struct iser_mem_reg  *mem_reg)
{
	struct ib_pool_fmr *mem;
	u64		   io_addr;
	u64		   *page_list;
	int		   status;

	page_list = page_vec->pages;
	io_addr	  = page_list[0];

887
	mem  = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
888 889
				    page_list,
				    page_vec->length,
890
				    io_addr);
891 892 893 894 895 896 897 898 899

	if (IS_ERR(mem)) {
		status = (int)PTR_ERR(mem);
		iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
		return status;
	}

	mem_reg->lkey  = mem->fmr->lkey;
	mem_reg->rkey  = mem->fmr->rkey;
900
	mem_reg->len   = page_vec->length * SIZE_4K;
901
	mem_reg->va    = io_addr;
902
	mem_reg->is_mr = 1;
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
	mem_reg->mem_h = (void *)mem;

	mem_reg->va   += page_vec->offset;
	mem_reg->len   = page_vec->data_size;

	iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
		 "entry[0]: (0x%08lx,%ld)] -> "
		 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
		 page_vec, page_vec->length,
		 (unsigned long)page_vec->pages[0],
		 (unsigned long)page_vec->data_size,
		 (unsigned int)mem_reg->lkey, mem_reg->mem_h,
		 (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
	return 0;
}

/**
920 921
 * Unregister (previosuly registered using FMR) memory.
 * If memory is non-FMR does nothing.
922
 */
923 924
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
			enum iser_data_dir cmd_dir)
925
{
926
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
927 928
	int ret;

929
	if (!reg->is_mr)
930 931
		return;

932 933 934 935 936 937 938 939 940
	iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);

	ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
	if (ret)
		iser_err("ib_fmr_pool_unmap failed %d\n", ret);

	reg->mem_h = NULL;
}

941 942
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
			    enum iser_data_dir cmd_dir)
943 944
{
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
945
	struct iser_conn *ib_conn = iser_task->ib_conn;
946 947 948 949 950 951 952 953
	struct fast_reg_descriptor *desc = reg->mem_h;

	if (!reg->is_mr)
		return;

	reg->mem_h = NULL;
	reg->is_mr = 0;
	spin_lock_bh(&ib_conn->lock);
954
	list_add_tail(&desc->list, &ib_conn->fastreg.pool);
955 956 957
	spin_unlock_bh(&ib_conn->lock);
}

958 959 960 961 962 963
int iser_post_recvl(struct iser_conn *ib_conn)
{
	struct ib_recv_wr rx_wr, *rx_wr_failed;
	struct ib_sge	  sge;
	int ib_ret;

964
	sge.addr   = ib_conn->login_resp_dma;
965 966 967
	sge.length = ISER_RX_LOGIN_SIZE;
	sge.lkey   = ib_conn->device->mr->lkey;

968
	rx_wr.wr_id   = (unsigned long)ib_conn->login_resp_buf;
969 970 971 972
	rx_wr.sg_list = &sge;
	rx_wr.num_sge = 1;
	rx_wr.next    = NULL;

973
	ib_conn->post_recv_buf_count++;
974 975 976
	ib_ret	= ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
977
		ib_conn->post_recv_buf_count--;
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
	}
	return ib_ret;
}

int iser_post_recvm(struct iser_conn *ib_conn, int count)
{
	struct ib_recv_wr *rx_wr, *rx_wr_failed;
	int i, ib_ret;
	unsigned int my_rx_head = ib_conn->rx_desc_head;
	struct iser_rx_desc *rx_desc;

	for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
		rx_desc		= &ib_conn->rx_descs[my_rx_head];
		rx_wr->wr_id	= (unsigned long)rx_desc;
		rx_wr->sg_list	= &rx_desc->rx_sg;
		rx_wr->num_sge	= 1;
		rx_wr->next	= rx_wr + 1;
995
		my_rx_head = (my_rx_head + 1) & ib_conn->qp_max_recv_dtos_mask;
996 997 998 999 1000
	}

	rx_wr--;
	rx_wr->next = NULL; /* mark end of work requests list */

1001
	ib_conn->post_recv_buf_count += count;
1002 1003 1004
	ib_ret	= ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1005
		ib_conn->post_recv_buf_count -= count;
1006 1007 1008 1009 1010 1011
	} else
		ib_conn->rx_desc_head = my_rx_head;
	return ib_ret;
}


1012 1013 1014 1015 1016
/**
 * iser_start_send - Initiate a Send DTO operation
 *
 * returns 0 on success, -1 on failure
 */
1017
int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
1018
{
1019
	int		  ib_ret;
1020 1021
	struct ib_send_wr send_wr, *send_wr_failed;

1022 1023
	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
		tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
1024 1025 1026

	send_wr.next	   = NULL;
	send_wr.wr_id	   = (unsigned long)tx_desc;
1027 1028
	send_wr.sg_list	   = tx_desc->tx_sg;
	send_wr.num_sge	   = tx_desc->num_sge;
1029
	send_wr.opcode	   = IB_WR_SEND;
1030
	send_wr.send_flags = IB_SEND_SIGNALED;
1031 1032 1033 1034 1035 1036 1037 1038

	atomic_inc(&ib_conn->post_send_buf_count);

	ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
	if (ib_ret) {
		iser_err("ib_post_send failed, ret:%d\n", ib_ret);
		atomic_dec(&ib_conn->post_send_buf_count);
	}
1039
	return ib_ret;
1040 1041
}

1042
static void iser_handle_comp_error(struct iser_tx_desc *desc,
1043
				struct iser_conn *ib_conn)
1044
{
1045 1046
	if (desc && desc->type == ISCSI_TX_DATAOUT)
		kmem_cache_free(ig.desc_cache, desc);
1047

1048
	if (ib_conn->post_recv_buf_count == 0 &&
1049 1050 1051 1052 1053 1054
	    atomic_read(&ib_conn->post_send_buf_count) == 0) {
		/* getting here when the state is UP means that the conn is *
		 * being terminated asynchronously from the iSCSI layer's   *
		 * perspective.                                             */
		if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
		    ISER_CONN_TERMINATING))
1055
			iscsi_conn_failure(ib_conn->iscsi_conn,
1056 1057
					   ISCSI_ERR_CONN_FAILED);

1058 1059 1060 1061
		/* no more non completed posts to the QP, complete the
		 * termination process w.o worrying on disconnect event */
		ib_conn->state = ISER_CONN_DOWN;
		wake_up_interruptible(&ib_conn->wait);
1062
	}
1063 1064
}

1065
static int iser_drain_tx_cq(struct iser_device  *device, int cq_index)
1066
{
1067
	struct ib_cq  *cq = device->tx_cq[cq_index];
1068
	struct ib_wc  wc;
1069
	struct iser_tx_desc *tx_desc;
1070 1071 1072 1073
	struct iser_conn *ib_conn;
	int completed_tx = 0;

	while (ib_poll_cq(cq, 1, &wc) == 1) {
1074
		tx_desc	= (struct iser_tx_desc *) (unsigned long) wc.wr_id;
1075 1076 1077
		ib_conn = wc.qp->qp_context;
		if (wc.status == IB_WC_SUCCESS) {
			if (wc.opcode == IB_WC_SEND)
1078
				iser_snd_completion(tx_desc, ib_conn);
1079
			else
1080 1081 1082 1083
				iser_err("expected opcode %d got %d\n",
					IB_WC_SEND, wc.opcode);
		} else {
			iser_err("tx id %llx status %d vend_err %x\n",
1084
				 wc.wr_id, wc.status, wc.vendor_err);
1085
			if (wc.wr_id != ISER_FASTREG_LI_WRID) {
1086 1087 1088
				atomic_dec(&ib_conn->post_send_buf_count);
				iser_handle_comp_error(tx_desc, ib_conn);
			}
1089 1090 1091 1092 1093 1094 1095
		}
		completed_tx++;
	}
	return completed_tx;
}


1096 1097
static void iser_cq_tasklet_fn(unsigned long data)
{
1098 1099 1100 1101
	struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data;
	struct iser_device  *device = cq_desc->device;
	int cq_index = cq_desc->cq_index;
	struct ib_cq	     *cq = device->rx_cq[cq_index];
1102
	 struct ib_wc	     wc;
1103
	 struct iser_rx_desc *desc;
1104
	 unsigned long	     xfer_len;
1105
	struct iser_conn *ib_conn;
1106 1107 1108 1109 1110 1111
	int completed_tx, completed_rx = 0;

	/* First do tx drain, so in a case where we have rx flushes and a successful
	 * tx completion we will still go through completion error handling.
	 */
	completed_tx = iser_drain_tx_cq(device, cq_index);
1112 1113

	while (ib_poll_cq(cq, 1, &wc) == 1) {
1114
		desc	 = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
1115
		BUG_ON(desc == NULL);
1116
		ib_conn = wc.qp->qp_context;
1117
		if (wc.status == IB_WC_SUCCESS) {
1118
			if (wc.opcode == IB_WC_RECV) {
1119
				xfer_len = (unsigned long)wc.byte_len;
1120 1121 1122 1123
				iser_rcv_completion(desc, xfer_len, ib_conn);
			} else
				iser_err("expected opcode %d got %d\n",
					IB_WC_RECV, wc.opcode);
1124
		} else {
1125
			if (wc.status != IB_WC_WR_FLUSH_ERR)
1126
				iser_err("rx id %llx status %d vend_err %x\n",
1127
					wc.wr_id, wc.status, wc.vendor_err);
1128 1129
			ib_conn->post_recv_buf_count--;
			iser_handle_comp_error(NULL, ib_conn);
1130
		}
1131 1132
		completed_rx++;
		if (!(completed_rx & 63))
1133
			completed_tx += iser_drain_tx_cq(device, cq_index);
1134 1135 1136 1137
	}
	/* #warning "it is assumed here that arming CQ only once its empty" *
	 * " would not cause interrupts to be missed"                       */
	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1138 1139

	iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
1140 1141 1142 1143
}

static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
{
1144 1145 1146
	struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context;
	struct iser_device  *device = cq_desc->device;
	int cq_index = cq_desc->cq_index;
1147

1148
	tasklet_schedule(&device->cq_tasklet[cq_index]);
1149
}
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174

u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
			     enum iser_data_dir cmd_dir, sector_t *sector)
{
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
	struct fast_reg_descriptor *desc = reg->mem_h;
	unsigned long sector_size = iser_task->sc->device->sector_size;
	struct ib_mr_status mr_status;
	int ret;

	if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
		desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
		ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
					 IB_MR_CHECK_SIG_STATUS, &mr_status);
		if (ret) {
			pr_err("ib_check_mr_status failed, ret %d\n", ret);
			goto err;
		}

		if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
			sector_t sector_off = mr_status.sig_err.sig_err_offset;

			do_div(sector_off, sector_size + 8);
			*sector = scsi_get_lba(iser_task->sc) + sector_off;

1175
			pr_err("PI error found type %d at sector %llx "
1176
			       "expected %x vs actual %x\n",
1177 1178
			       mr_status.sig_err.err_type,
			       (unsigned long long)*sector,
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
			       mr_status.sig_err.expected,
			       mr_status.sig_err.actual);

			switch (mr_status.sig_err.err_type) {
			case IB_SIG_BAD_GUARD:
				return 0x1;
			case IB_SIG_BAD_REFTAG:
				return 0x3;
			case IB_SIG_BAD_APPTAG:
				return 0x2;
			}
		}
	}

	return 0;
err:
	/* Not alot we can do here, return ambiguous guard error */
	return 0x1;
}