iser_verbs.c 34.3 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/module.h>
36
#include <linux/slab.h>
37 38 39 40 41
#include <linux/delay.h>

#include "iscsi_iser.h"

#define ISCSI_ISER_MAX_CONN	8
42 43
#define ISER_MAX_RX_CQ_LEN	(ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
#define ISER_MAX_TX_CQ_LEN	(ISER_QP_MAX_REQ_DTOS  * ISCSI_ISER_MAX_CONN)
44 45 46 47 48 49 50 51 52 53 54 55 56 57

static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);

static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
	iser_err("got cq event %d \n", cause->event);
}

static void iser_qp_event_callback(struct ib_event *cause, void *context)
{
	iser_err("got qp event %d\n",cause->event);
}

58 59 60 61 62 63 64
static void iser_event_handler(struct ib_event_handler *handler,
				struct ib_event *event)
{
	iser_err("async event %d on device %s port %d\n", event->event,
		event->device->name, event->element.port_num);
}

65 66 67 68 69 70 71 72 73
/**
 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
 * the adapator.
 *
 * returns 0 on success, -1 on failure
 */
static int iser_create_device_ib_res(struct iser_device *device)
{
74
	struct iser_cq_desc *cq_desc;
75
	struct ib_device_attr *dev_attr = &device->dev_attr;
76
	int ret, i;
77

78 79
	ret = ib_query_device(device->ib_device, dev_attr);
	if (ret) {
80
		pr_warn("Query device failed for %s\n", device->ib_device->name);
81
		return ret;
82 83 84 85 86 87 88 89 90 91 92 93
	}

	/* Assign function handles  - based on FMR support */
	if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
	    device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
		iser_info("FMR supported, using FMR for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
		device->iser_free_rdma_reg_res = iser_free_fmr_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
	} else
	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
94 95 96 97 98
		iser_info("FastReg supported, using FastReg for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
		device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
99
	} else {
100
		iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
101
		return -1;
102
	}
103

104
	device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
105 106 107
	iser_info("using %d CQs, device %s supports %d vectors\n",
		  device->cqs_used, device->ib_device->name,
		  device->ib_device->num_comp_vectors);
108 109 110 111 112 113 114

	device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
				  GFP_KERNEL);
	if (device->cq_desc == NULL)
		goto cq_desc_err;
	cq_desc = device->cq_desc;

115 116 117 118
	device->pd = ib_alloc_pd(device->ib_device);
	if (IS_ERR(device->pd))
		goto pd_err;

119 120 121 122 123 124 125 126 127
	for (i = 0; i < device->cqs_used; i++) {
		cq_desc[i].device   = device;
		cq_desc[i].cq_index = i;

		device->rx_cq[i] = ib_create_cq(device->ib_device,
					  iser_cq_callback,
					  iser_cq_event_callback,
					  (void *)&cq_desc[i],
					  ISER_MAX_RX_CQ_LEN, i);
128 129
		if (IS_ERR(device->rx_cq[i])) {
			device->rx_cq[i] = NULL;
130
			goto cq_err;
131
		}
132

133 134 135 136
		device->tx_cq[i] = ib_create_cq(device->ib_device,
					  NULL, iser_cq_event_callback,
					  (void *)&cq_desc[i],
					  ISER_MAX_TX_CQ_LEN, i);
137

138 139
		if (IS_ERR(device->tx_cq[i])) {
			device->tx_cq[i] = NULL;
140
			goto cq_err;
141
		}
142

143 144
		if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
			goto cq_err;
145

146 147 148 149
		tasklet_init(&device->cq_tasklet[i],
			     iser_cq_tasklet_fn,
			(unsigned long)&cq_desc[i]);
	}
150

151 152 153
	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
				   IB_ACCESS_REMOTE_WRITE |
				   IB_ACCESS_REMOTE_READ);
154 155 156
	if (IS_ERR(device->mr))
		goto dma_mr_err;

157 158 159 160 161
	INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
				iser_event_handler);
	if (ib_register_event_handler(&device->event_handler))
		goto handler_err;

162 163
	return 0;

164 165
handler_err:
	ib_dereg_mr(device->mr);
166
dma_mr_err:
167 168
	for (i = 0; i < device->cqs_used; i++)
		tasklet_kill(&device->cq_tasklet[i]);
169
cq_err:
170 171 172 173 174
	for (i = 0; i < device->cqs_used; i++) {
		if (device->tx_cq[i])
			ib_destroy_cq(device->tx_cq[i]);
		if (device->rx_cq[i])
			ib_destroy_cq(device->rx_cq[i]);
175
	}
176 177
	ib_dealloc_pd(device->pd);
pd_err:
178 179
	kfree(device->cq_desc);
cq_desc_err:
180 181 182 183 184
	iser_err("failed to allocate an IB resource\n");
	return -1;
}

/**
185
 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
186 187 188 189
 * CQ and PD created with the device associated with the adapator.
 */
static void iser_free_device_ib_res(struct iser_device *device)
{
190
	int i;
191 192
	BUG_ON(device->mr == NULL);

193 194 195 196 197 198 199 200
	for (i = 0; i < device->cqs_used; i++) {
		tasklet_kill(&device->cq_tasklet[i]);
		(void)ib_destroy_cq(device->tx_cq[i]);
		(void)ib_destroy_cq(device->rx_cq[i]);
		device->tx_cq[i] = NULL;
		device->rx_cq[i] = NULL;
	}

201
	(void)ib_unregister_event_handler(&device->event_handler);
202 203 204
	(void)ib_dereg_mr(device->mr);
	(void)ib_dealloc_pd(device->pd);

205 206
	kfree(device->cq_desc);

207 208 209 210 211
	device->mr = NULL;
	device->pd = NULL;
}

/**
212
 * iser_create_fmr_pool - Creates FMR pool and page_vector
213
 *
214
 * returns 0 on success, or errno code on failure
215
 */
S
Sagi Grimberg 已提交
216
int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
217
{
S
Sagi Grimberg 已提交
218
	struct iser_device *device = ib_conn->device;
219
	struct ib_fmr_pool_param params;
220
	int ret = -ENOMEM;
221

S
Sagi Grimberg 已提交
222
	ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
223 224
					(sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
					GFP_KERNEL);
S
Sagi Grimberg 已提交
225
	if (!ib_conn->fmr.page_vec)
226
		return ret;
227

S
Sagi Grimberg 已提交
228
	ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
229

230
	params.page_shift        = SHIFT_4K;
231 232 233 234 235
	/* when the first/last SG element are not start/end *
	 * page aligned, the map whould be of N+1 pages     */
	params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
	/* make the pool size twice the max number of SCSI commands *
	 * the ML is expected to queue, watermark for unmap at 50%  */
236 237
	params.pool_size	 = cmds_max * 2;
	params.dirty_watermark	 = cmds_max;
238 239 240 241 242 243
	params.cache		 = 0;
	params.flush_function	 = NULL;
	params.access		 = (IB_ACCESS_LOCAL_WRITE  |
				    IB_ACCESS_REMOTE_WRITE |
				    IB_ACCESS_REMOTE_READ);

S
Sagi Grimberg 已提交
244 245
	ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
	if (!IS_ERR(ib_conn->fmr.pool))
246 247 248
		return 0;

	/* no FMR => no need for page_vec */
S
Sagi Grimberg 已提交
249 250
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
251

S
Sagi Grimberg 已提交
252 253
	ret = PTR_ERR(ib_conn->fmr.pool);
	ib_conn->fmr.pool = NULL;
254 255 256 257
	if (ret != -ENOSYS) {
		iser_err("FMR allocation failed, err %d\n", ret);
		return ret;
	} else {
258
		iser_warn("FMRs are not supported, using unaligned mode\n");
259
		return 0;
260
	}
261 262 263 264 265
}

/**
 * iser_free_fmr_pool - releases the FMR pool and page vec
 */
S
Sagi Grimberg 已提交
266
void iser_free_fmr_pool(struct ib_conn *ib_conn)
267 268
{
	iser_info("freeing conn %p fmr pool %p\n",
S
Sagi Grimberg 已提交
269
		  ib_conn, ib_conn->fmr.pool);
270

S
Sagi Grimberg 已提交
271 272
	if (ib_conn->fmr.pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr.pool);
273

S
Sagi Grimberg 已提交
274
	ib_conn->fmr.pool = NULL;
275

S
Sagi Grimberg 已提交
276 277
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
278 279
}

280 281
static int
iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
282
			 bool pi_enable, struct fast_reg_descriptor *desc)
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
{
	int ret;

	desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
						      ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_frpl)) {
		ret = PTR_ERR(desc->data_frpl);
		iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
			 ret);
		return PTR_ERR(desc->data_frpl);
	}

	desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_mr)) {
		ret = PTR_ERR(desc->data_mr);
		iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
		goto fast_reg_mr_failure;
	}
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
	desc->reg_indicators |= ISER_DATA_KEY_VALID;

	if (pi_enable) {
		struct ib_mr_init_attr mr_init_attr = {0};
		struct iser_pi_context *pi_ctx = NULL;

		desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
		if (!desc->pi_ctx) {
			iser_err("Failed to allocate pi context\n");
			ret = -ENOMEM;
			goto pi_ctx_alloc_failure;
		}
		pi_ctx = desc->pi_ctx;

		pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
						    ISCSI_ISER_SG_TABLESIZE);
		if (IS_ERR(pi_ctx->prot_frpl)) {
			ret = PTR_ERR(pi_ctx->prot_frpl);
			iser_err("Failed to allocate prot frpl ret=%d\n",
				 ret);
			goto prot_frpl_failure;
		}

		pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
						ISCSI_ISER_SG_TABLESIZE + 1);
		if (IS_ERR(pi_ctx->prot_mr)) {
			ret = PTR_ERR(pi_ctx->prot_mr);
			iser_err("Failed to allocate prot frmr ret=%d\n",
				 ret);
			goto prot_mr_failure;
		}
		desc->reg_indicators |= ISER_PROT_KEY_VALID;

		mr_init_attr.max_reg_descriptors = 2;
		mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
		pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
		if (IS_ERR(pi_ctx->sig_mr)) {
			ret = PTR_ERR(pi_ctx->sig_mr);
			iser_err("Failed to allocate signature enabled mr err=%d\n",
				 ret);
			goto sig_mr_failure;
		}
		desc->reg_indicators |= ISER_SIG_KEY_VALID;
	}
	desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;

347 348
	iser_dbg("Create fr_desc %p page_list %p\n",
		 desc, desc->data_frpl->page_list);
349 350

	return 0;
351 352 353 354 355 356 357 358
sig_mr_failure:
	ib_dereg_mr(desc->pi_ctx->prot_mr);
prot_mr_failure:
	ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
prot_frpl_failure:
	kfree(desc->pi_ctx);
pi_ctx_alloc_failure:
	ib_dereg_mr(desc->data_mr);
359 360 361 362 363 364
fast_reg_mr_failure:
	ib_free_fast_reg_page_list(desc->data_frpl);

	return ret;
}

365
/**
366
 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
367 368 369
 * for fast registration work requests.
 * returns 0 on success, or errno code on failure
 */
S
Sagi Grimberg 已提交
370
int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
371
{
S
Sagi Grimberg 已提交
372 373
	struct iser_device *device = ib_conn->device;
	struct fast_reg_descriptor *desc;
374 375
	int i, ret;

S
Sagi Grimberg 已提交
376 377
	INIT_LIST_HEAD(&ib_conn->fastreg.pool);
	ib_conn->fastreg.pool_size = 0;
378
	for (i = 0; i < cmds_max; i++) {
379
		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
380 381 382 383 384 385
		if (!desc) {
			iser_err("Failed to allocate a new fast_reg descriptor\n");
			ret = -ENOMEM;
			goto err;
		}

386
		ret = iser_create_fastreg_desc(device->ib_device, device->pd,
S
Sagi Grimberg 已提交
387
					       ib_conn->pi_support, desc);
388 389 390 391 392
		if (ret) {
			iser_err("Failed to create fastreg descriptor err=%d\n",
				 ret);
			kfree(desc);
			goto err;
393 394
		}

S
Sagi Grimberg 已提交
395 396
		list_add_tail(&desc->list, &ib_conn->fastreg.pool);
		ib_conn->fastreg.pool_size++;
397 398 399
	}

	return 0;
400

401
err:
S
Sagi Grimberg 已提交
402
	iser_free_fastreg_pool(ib_conn);
403 404 405 406
	return ret;
}

/**
407
 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
408
 */
S
Sagi Grimberg 已提交
409
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
410 411 412 413
{
	struct fast_reg_descriptor *desc, *tmp;
	int i = 0;

S
Sagi Grimberg 已提交
414
	if (list_empty(&ib_conn->fastreg.pool))
415 416
		return;

S
Sagi Grimberg 已提交
417
	iser_info("freeing conn %p fr pool\n", ib_conn);
418

S
Sagi Grimberg 已提交
419
	list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
420 421 422
		list_del(&desc->list);
		ib_free_fast_reg_page_list(desc->data_frpl);
		ib_dereg_mr(desc->data_mr);
423 424 425 426 427 428
		if (desc->pi_ctx) {
			ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
			ib_dereg_mr(desc->pi_ctx->prot_mr);
			ib_destroy_mr(desc->pi_ctx->sig_mr);
			kfree(desc->pi_ctx);
		}
429 430 431 432
		kfree(desc);
		++i;
	}

S
Sagi Grimberg 已提交
433
	if (i < ib_conn->fastreg.pool_size)
434
		iser_warn("pool still has %d regions registered\n",
S
Sagi Grimberg 已提交
435
			  ib_conn->fastreg.pool_size - i);
436 437
}

438 439 440 441 442
/**
 * iser_create_ib_conn_res - Queue-Pair (QP)
 *
 * returns 0 on success, -1 on failure
 */
S
Sagi Grimberg 已提交
443
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
444 445 446 447 448 449
{
	struct iser_device	*device;
	struct ib_qp_init_attr	init_attr;
	int			ret = -ENOMEM;
	int index, min_index = 0;

S
Sagi Grimberg 已提交
450
	BUG_ON(ib_conn->device == NULL);
451

S
Sagi Grimberg 已提交
452
	device = ib_conn->device;
453 454 455

	memset(&init_attr, 0, sizeof init_attr);

456 457 458 459 460 461 462
	mutex_lock(&ig.connlist_mutex);
	/* select the CQ with the minimal number of usages */
	for (index = 0; index < device->cqs_used; index++)
		if (device->cq_active_qps[index] <
		    device->cq_active_qps[min_index])
			min_index = index;
	device->cq_active_qps[min_index]++;
S
Sagi Grimberg 已提交
463
	ib_conn->cq_index = min_index;
464
	mutex_unlock(&ig.connlist_mutex);
S
Sagi Grimberg 已提交
465
	iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
466

467
	init_attr.event_handler = iser_qp_event_callback;
S
Sagi Grimberg 已提交
468
	init_attr.qp_context	= (void *)ib_conn;
469 470
	init_attr.send_cq	= device->tx_cq[min_index];
	init_attr.recv_cq	= device->rx_cq[min_index];
471
	init_attr.cap.max_recv_wr  = ISER_QP_MAX_RECV_DTOS;
472
	init_attr.cap.max_send_sge = 2;
473
	init_attr.cap.max_recv_sge = 1;
474 475
	init_attr.sq_sig_type	= IB_SIGNAL_REQ_WR;
	init_attr.qp_type	= IB_QPT_RC;
S
Sagi Grimberg 已提交
476
	if (ib_conn->pi_support) {
477 478 479 480 481
		init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS;
		init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
	} else {
		init_attr.cap.max_send_wr  = ISER_QP_MAX_REQ_DTOS;
	}
482

S
Sagi Grimberg 已提交
483
	ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
484
	if (ret)
485
		goto out_err;
486

S
Sagi Grimberg 已提交
487
	ib_conn->qp = ib_conn->cma_id->qp;
488
	iser_info("setting conn %p cma_id %p qp %p\n",
S
Sagi Grimberg 已提交
489 490
		  ib_conn, ib_conn->cma_id,
		  ib_conn->cma_id->qp);
491 492
	return ret;

493
out_err:
494 495 496 497 498 499 500 501 502 503 504
	iser_err("unable to alloc mem or create resource, err %d\n", ret);
	return ret;
}

/**
 * based on the resolved device node GUID see if there already allocated
 * device for this device. If there's no such, create one.
 */
static
struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
{
A
Arne Redlich 已提交
505
	struct iser_device *device;
506 507 508

	mutex_lock(&ig.device_list_mutex);

A
Arne Redlich 已提交
509
	list_for_each_entry(device, &ig.device_list, ig_list)
510 511
		/* find if there's a match using the node GUID */
		if (device->ib_device->node_guid == cma_id->device->node_guid)
512
			goto inc_refcnt;
A
Arne Redlich 已提交
513 514 515 516 517 518 519 520 521 522 523 524

	device = kzalloc(sizeof *device, GFP_KERNEL);
	if (device == NULL)
		goto out;

	/* assign this device to the device */
	device->ib_device = cma_id->device;
	/* init the device and link it into ig device list */
	if (iser_create_device_ib_res(device)) {
		kfree(device);
		device = NULL;
		goto out;
525
	}
A
Arne Redlich 已提交
526 527
	list_add(&device->ig_list, &ig.device_list);

528
inc_refcnt:
529
	device->refcount++;
530
out:
531 532 533 534 535 536 537 538 539
	mutex_unlock(&ig.device_list_mutex);
	return device;
}

/* if there's no demand for this device, release it */
static void iser_device_try_release(struct iser_device *device)
{
	mutex_lock(&ig.device_list_mutex);
	device->refcount--;
540
	iser_info("device %p refcount %d\n", device, device->refcount);
541 542 543 544 545 546 547 548
	if (!device->refcount) {
		iser_free_device_ib_res(device);
		list_del(&device->ig_list);
		kfree(device);
	}
	mutex_unlock(&ig.device_list_mutex);
}

549 550 551
/**
 * Called with state mutex held
 **/
552 553 554
static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
				     enum iser_conn_state comp,
				     enum iser_conn_state exch)
555 556 557
{
	int ret;

558 559 560 561
	ret = (iser_conn->state == comp);
	if (ret)
		iser_conn->state = exch;

562 563 564
	return ret;
}

565 566
void iser_release_work(struct work_struct *work)
{
567
	struct iser_conn *iser_conn;
568
	int rc;
569

570
	iser_conn = container_of(work, struct iser_conn, release_work);
571 572

	/* wait for .conn_stop callback */
573
	rc = wait_for_completion_timeout(&iser_conn->stop_completion, 30 * HZ);
574
	WARN_ON(rc == 0);
575 576

	/* wait for the qp`s post send and post receive buffers to empty */
577
	rc = wait_for_completion_timeout(&iser_conn->flush_completion, 30 * HZ);
578 579
	WARN_ON(rc == 0);

580
	iser_conn->state = ISER_CONN_DOWN;
581

582 583 584
	mutex_lock(&iser_conn->state_mutex);
	iser_conn->state = ISER_CONN_DOWN;
	mutex_unlock(&iser_conn->state_mutex);
585

586
	iser_conn_release(iser_conn);
587 588
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
/**
 * iser_free_ib_conn_res - release IB related resources
 * @iser_conn: iser connection struct
 *
 * This routine is called with the iser state mutex held
 * so the cm_id removal is out of here. It is Safe to
 * be invoked multiple times.
 */
static void iser_free_ib_conn_res(struct iser_conn *iser_conn)
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;

	iser_info("freeing conn %p cma_id %p qp %p\n",
		  iser_conn, ib_conn->cma_id, ib_conn->qp);

	iser_free_rx_descriptors(iser_conn);

	if (ib_conn->qp != NULL) {
		ib_conn->device->cq_active_qps[ib_conn->cq_index]--;
		rdma_destroy_qp(ib_conn->cma_id);
		ib_conn->qp = NULL;
	}

	if (device != NULL) {
		iser_device_try_release(device);
		ib_conn->device = NULL;
	}
}

619 620 621
/**
 * Frees all conn objects and deallocs conn descriptor
 */
622
void iser_conn_release(struct iser_conn *iser_conn)
623
{
S
Sagi Grimberg 已提交
624
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
625 626

	mutex_lock(&ig.connlist_mutex);
627
	list_del(&iser_conn->conn_list);
628
	mutex_unlock(&ig.connlist_mutex);
629

630 631 632 633
	mutex_lock(&iser_conn->state_mutex);
	BUG_ON(iser_conn->state != ISER_CONN_DOWN);
	iser_free_ib_conn_res(iser_conn);
	mutex_unlock(&iser_conn->state_mutex);
634

S
Sagi Grimberg 已提交
635 636 637
	if (ib_conn->cma_id != NULL) {
		rdma_destroy_id(ib_conn->cma_id);
		ib_conn->cma_id = NULL;
R
Roi Dayan 已提交
638
	}
S
Sagi Grimberg 已提交
639

640
	kfree(iser_conn);
641 642
}

643 644 645
/**
 * triggers start of the disconnect procedures and wait for them to be done
 */
646
void iser_conn_terminate(struct iser_conn *iser_conn)
647
{
S
Sagi Grimberg 已提交
648
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
649 650 651 652 653 654 655
	int err = 0;

	/* change the ib conn state only if the conn is UP, however always call
	 * rdma_disconnect since this is the only way to cause the CMA to change
	 * the QP state to ERROR
	 */

656
	iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP, ISER_CONN_TERMINATING);
S
Sagi Grimberg 已提交
657
	err = rdma_disconnect(ib_conn->cma_id);
658 659
	if (err)
		iser_err("Failed to disconnect, conn: 0x%p err %d\n",
660
			 iser_conn, err);
661 662
}

663 664 665
/**
 * Called with state mutex held
 **/
666
static void iser_connect_error(struct rdma_cm_id *cma_id)
667
{
668
	struct iser_conn *iser_conn;
669

670 671
	iser_conn = (struct iser_conn *)cma_id->context;
	iser_conn->state = ISER_CONN_DOWN;
672 673
}

674 675 676
/**
 * Called with state mutex held
 **/
677
static void iser_addr_handler(struct rdma_cm_id *cma_id)
678 679
{
	struct iser_device *device;
680
	struct iser_conn   *iser_conn;
S
Sagi Grimberg 已提交
681
	struct ib_conn   *ib_conn;
682 683
	int    ret;

684 685
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
686 687 688
		/* bailout */
		return;

S
Sagi Grimberg 已提交
689
	ib_conn = &iser_conn->ib_conn;
690
	device = iser_device_find_by_ib_device(cma_id);
691 692
	if (!device) {
		iser_err("device lookup/creation failed\n");
693 694
		iser_connect_error(cma_id);
		return;
695 696
	}

S
Sagi Grimberg 已提交
697
	ib_conn->device = device;
698

699 700 701 702 703 704
	/* connection T10-PI support */
	if (iser_pi_enable) {
		if (!(device->dev_attr.device_cap_flags &
		      IB_DEVICE_SIGNATURE_HANDOVER)) {
			iser_warn("T10-PI requested but not supported on %s, "
				  "continue without T10-PI\n",
S
Sagi Grimberg 已提交
705 706
				  ib_conn->device->ib_device->name);
			ib_conn->pi_support = false;
707
		} else {
S
Sagi Grimberg 已提交
708
			ib_conn->pi_support = true;
709 710 711
		}
	}

712 713 714
	ret = rdma_resolve_route(cma_id, 1000);
	if (ret) {
		iser_err("resolve route failed: %d\n", ret);
715 716
		iser_connect_error(cma_id);
		return;
717 718 719
	}
}

720 721 722
/**
 * Called with state mutex held
 **/
723
static void iser_route_handler(struct rdma_cm_id *cma_id)
724 725 726
{
	struct rdma_conn_param conn_param;
	int    ret;
727
	struct iser_cm_hdr req_hdr;
728
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
S
Sagi Grimberg 已提交
729 730
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
731

732
	if (iser_conn->state != ISER_CONN_PENDING)
733 734 735
		/* bailout */
		return;

S
Sagi Grimberg 已提交
736
	ret = iser_create_ib_conn_res(ib_conn);
737 738 739 740
	if (ret)
		goto failure;

	memset(&conn_param, 0, sizeof conn_param);
741
	conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
742 743 744 745
	conn_param.initiator_depth     = 1;
	conn_param.retry_count	       = 7;
	conn_param.rnr_retry_count     = 6;

746 747 748 749 750 751
	memset(&req_hdr, 0, sizeof(req_hdr));
	req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
			ISER_SEND_W_INV_NOT_SUPPORTED);
	conn_param.private_data		= (void *)&req_hdr;
	conn_param.private_data_len	= sizeof(struct iser_cm_hdr);

752 753 754 755 756 757
	ret = rdma_connect(cma_id, &conn_param);
	if (ret) {
		iser_err("failure connecting: %d\n", ret);
		goto failure;
	}

758
	return;
759
failure:
760
	iser_connect_error(cma_id);
761 762 763 764
}

static void iser_connected_handler(struct rdma_cm_id *cma_id)
{
765
	struct iser_conn *iser_conn;
766 767 768
	struct ib_qp_attr attr;
	struct ib_qp_init_attr init_attr;

769 770
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
771 772 773
		/* bailout */
		return;

774 775
	(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
	iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
776

777 778
	iser_conn->state = ISER_CONN_UP;
	complete(&iser_conn->up_completion);
779 780
}

781
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
782
{
783
	struct iser_conn *iser_conn;
S
Sagi Grimberg 已提交
784
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
785

786
	iser_conn = (struct iser_conn *)cma_id->context;
787 788 789

	/* getting here when the state is UP means that the conn is being *
	 * terminated asynchronously from the iSCSI layer's perspective.  */
790 791 792 793
	if (iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
				      ISER_CONN_TERMINATING)){
		if (iser_conn->iscsi_conn)
			iscsi_conn_failure(iser_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED);
794 795 796
		else
			iser_err("iscsi_iser connection isn't bound\n");
	}
797

798 799 800 801
	/* Complete the termination process if no posts are pending. This code
	 * block also exists in iser_handle_comp_error(), but it is needed here
	 * for cases of no flushes at all, e.g. discovery over rdma.
	 */
S
Sagi Grimberg 已提交
802 803
	if (ib_conn->post_recv_buf_count == 0 &&
	    (atomic_read(&ib_conn->post_send_buf_count) == 0)) {
804
		complete(&iser_conn->flush_completion);
805 806 807 808 809
	}
}

static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
810
	struct iser_conn *iser_conn;
811

812
	iser_conn = (struct iser_conn *)cma_id->context;
813 814
	iser_info("event %d status %d conn %p id %p\n",
		  event->event, event->status, cma_id->context, cma_id);
815

816
	mutex_lock(&iser_conn->state_mutex);
817 818
	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
819
		iser_addr_handler(cma_id);
820 821
		break;
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
822
		iser_route_handler(cma_id);
823 824 825 826 827 828 829 830 831
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		iser_connected_handler(cma_id);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
	case RDMA_CM_EVENT_ROUTE_ERROR:
	case RDMA_CM_EVENT_CONNECT_ERROR:
	case RDMA_CM_EVENT_UNREACHABLE:
	case RDMA_CM_EVENT_REJECTED:
832
		iser_connect_error(cma_id);
833 834 835
		break;
	case RDMA_CM_EVENT_DISCONNECTED:
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
836
	case RDMA_CM_EVENT_ADDR_CHANGE:
837
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
838
		iser_disconnected_handler(cma_id);
839 840
		break;
	default:
841
		iser_err("Unexpected RDMA CM event (%d)\n", event->event);
842 843
		break;
	}
844
	mutex_unlock(&iser_conn->state_mutex);
845
	return 0;
846 847
}

848
void iser_conn_init(struct iser_conn *iser_conn)
849
{
850
	iser_conn->state = ISER_CONN_INIT;
S
Sagi Grimberg 已提交
851 852
	iser_conn->ib_conn.post_recv_buf_count = 0;
	atomic_set(&iser_conn->ib_conn.post_send_buf_count, 0);
853 854 855 856
	init_completion(&iser_conn->stop_completion);
	init_completion(&iser_conn->flush_completion);
	init_completion(&iser_conn->up_completion);
	INIT_LIST_HEAD(&iser_conn->conn_list);
S
Sagi Grimberg 已提交
857
	spin_lock_init(&iser_conn->ib_conn.lock);
858
	mutex_init(&iser_conn->state_mutex);
859 860 861 862
}

 /**
 * starts the process of connecting to the target
863
 * sleeps until the connection is established or rejected
864
 */
865
int iser_connect(struct iser_conn   *iser_conn,
R
Roi Dayan 已提交
866 867
		 struct sockaddr    *src_addr,
		 struct sockaddr    *dst_addr,
868 869
		 int                 non_blocking)
{
S
Sagi Grimberg 已提交
870
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
871 872
	int err = 0;

873
	mutex_lock(&iser_conn->state_mutex);
874

875
	sprintf(iser_conn->name, "%pISp", dst_addr);
R
Roi Dayan 已提交
876

877
	iser_info("connecting to: %s\n", iser_conn->name);
878 879

	/* the device is known only --after-- address resolution */
S
Sagi Grimberg 已提交
880
	ib_conn->device = NULL;
881

882
	iser_conn->state = ISER_CONN_PENDING;
883

S
Sagi Grimberg 已提交
884 885 886 887 888
	ib_conn->cma_id = rdma_create_id(iser_cma_handler,
					 (void *)iser_conn,
					 RDMA_PS_TCP, IB_QPT_RC);
	if (IS_ERR(ib_conn->cma_id)) {
		err = PTR_ERR(ib_conn->cma_id);
889 890 891 892
		iser_err("rdma_create_id failed: %d\n", err);
		goto id_failure;
	}

S
Sagi Grimberg 已提交
893
	err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
894 895 896 897 898 899
	if (err) {
		iser_err("rdma_resolve_addr failed: %d\n", err);
		goto addr_failure;
	}

	if (!non_blocking) {
900
		wait_for_completion_interruptible(&iser_conn->up_completion);
901

902
		if (iser_conn->state != ISER_CONN_UP) {
903 904 905 906
			err =  -EIO;
			goto connect_failure;
		}
	}
907
	mutex_unlock(&iser_conn->state_mutex);
908 909

	mutex_lock(&ig.connlist_mutex);
910
	list_add(&iser_conn->conn_list, &ig.connlist);
911 912 913 914
	mutex_unlock(&ig.connlist_mutex);
	return 0;

id_failure:
S
Sagi Grimberg 已提交
915
	ib_conn->cma_id = NULL;
916
addr_failure:
917
	iser_conn->state = ISER_CONN_DOWN;
918
connect_failure:
919 920
	mutex_unlock(&iser_conn->state_mutex);
	iser_conn_release(iser_conn);
921 922 923 924 925 926 927 928
	return err;
}

/**
 * iser_reg_page_vec - Register physical memory
 *
 * returns: 0 on success, errno code on failure
 */
S
Sagi Grimberg 已提交
929
int iser_reg_page_vec(struct ib_conn *ib_conn,
930 931 932 933 934 935 936 937 938 939 940
		      struct iser_page_vec *page_vec,
		      struct iser_mem_reg  *mem_reg)
{
	struct ib_pool_fmr *mem;
	u64		   io_addr;
	u64		   *page_list;
	int		   status;

	page_list = page_vec->pages;
	io_addr	  = page_list[0];

S
Sagi Grimberg 已提交
941
	mem  = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
942 943
				    page_list,
				    page_vec->length,
944
				    io_addr);
945 946 947 948 949 950 951 952 953

	if (IS_ERR(mem)) {
		status = (int)PTR_ERR(mem);
		iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
		return status;
	}

	mem_reg->lkey  = mem->fmr->lkey;
	mem_reg->rkey  = mem->fmr->rkey;
954
	mem_reg->len   = page_vec->length * SIZE_4K;
955
	mem_reg->va    = io_addr;
956
	mem_reg->is_mr = 1;
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
	mem_reg->mem_h = (void *)mem;

	mem_reg->va   += page_vec->offset;
	mem_reg->len   = page_vec->data_size;

	iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
		 "entry[0]: (0x%08lx,%ld)] -> "
		 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
		 page_vec, page_vec->length,
		 (unsigned long)page_vec->pages[0],
		 (unsigned long)page_vec->data_size,
		 (unsigned int)mem_reg->lkey, mem_reg->mem_h,
		 (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
	return 0;
}

/**
974 975
 * Unregister (previosuly registered using FMR) memory.
 * If memory is non-FMR does nothing.
976
 */
977 978
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
			enum iser_data_dir cmd_dir)
979
{
980
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
981 982
	int ret;

983
	if (!reg->is_mr)
984 985
		return;

986 987 988 989 990 991 992 993 994
	iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);

	ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
	if (ret)
		iser_err("ib_fmr_pool_unmap failed %d\n", ret);

	reg->mem_h = NULL;
}

995 996
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
			    enum iser_data_dir cmd_dir)
997 998
{
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
999
	struct iser_conn *iser_conn = iser_task->iser_conn;
S
Sagi Grimberg 已提交
1000
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1001 1002 1003 1004 1005 1006 1007
	struct fast_reg_descriptor *desc = reg->mem_h;

	if (!reg->is_mr)
		return;

	reg->mem_h = NULL;
	reg->is_mr = 0;
S
Sagi Grimberg 已提交
1008 1009 1010
	spin_lock_bh(&ib_conn->lock);
	list_add_tail(&desc->list, &ib_conn->fastreg.pool);
	spin_unlock_bh(&ib_conn->lock);
1011 1012
}

1013
int iser_post_recvl(struct iser_conn *iser_conn)
1014 1015
{
	struct ib_recv_wr rx_wr, *rx_wr_failed;
S
Sagi Grimberg 已提交
1016
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1017 1018 1019
	struct ib_sge	  sge;
	int ib_ret;

1020
	sge.addr   = iser_conn->login_resp_dma;
1021
	sge.length = ISER_RX_LOGIN_SIZE;
S
Sagi Grimberg 已提交
1022
	sge.lkey   = ib_conn->device->mr->lkey;
1023

1024
	rx_wr.wr_id   = (unsigned long)iser_conn->login_resp_buf;
1025 1026 1027 1028
	rx_wr.sg_list = &sge;
	rx_wr.num_sge = 1;
	rx_wr.next    = NULL;

S
Sagi Grimberg 已提交
1029 1030
	ib_conn->post_recv_buf_count++;
	ib_ret	= ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
1031 1032
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1033
		ib_conn->post_recv_buf_count--;
1034 1035 1036 1037
	}
	return ib_ret;
}

1038
int iser_post_recvm(struct iser_conn *iser_conn, int count)
1039 1040 1041
{
	struct ib_recv_wr *rx_wr, *rx_wr_failed;
	int i, ib_ret;
S
Sagi Grimberg 已提交
1042
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1043
	unsigned int my_rx_head = iser_conn->rx_desc_head;
1044 1045
	struct iser_rx_desc *rx_desc;

S
Sagi Grimberg 已提交
1046
	for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1047
		rx_desc		= &iser_conn->rx_descs[my_rx_head];
1048 1049 1050 1051
		rx_wr->wr_id	= (unsigned long)rx_desc;
		rx_wr->sg_list	= &rx_desc->rx_sg;
		rx_wr->num_sge	= 1;
		rx_wr->next	= rx_wr + 1;
1052
		my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1053 1054 1055 1056 1057
	}

	rx_wr--;
	rx_wr->next = NULL; /* mark end of work requests list */

S
Sagi Grimberg 已提交
1058 1059
	ib_conn->post_recv_buf_count += count;
	ib_ret	= ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
1060 1061
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1062
		ib_conn->post_recv_buf_count -= count;
1063
	} else
1064
		iser_conn->rx_desc_head = my_rx_head;
1065 1066 1067 1068
	return ib_ret;
}


1069 1070 1071 1072 1073
/**
 * iser_start_send - Initiate a Send DTO operation
 *
 * returns 0 on success, -1 on failure
 */
S
Sagi Grimberg 已提交
1074
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
1075
{
1076
	int		  ib_ret;
1077 1078
	struct ib_send_wr send_wr, *send_wr_failed;

S
Sagi Grimberg 已提交
1079
	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1080 1081
				      tx_desc->dma_addr, ISER_HEADERS_LEN,
				      DMA_TO_DEVICE);
1082 1083 1084

	send_wr.next	   = NULL;
	send_wr.wr_id	   = (unsigned long)tx_desc;
1085 1086
	send_wr.sg_list	   = tx_desc->tx_sg;
	send_wr.num_sge	   = tx_desc->num_sge;
1087
	send_wr.opcode	   = IB_WR_SEND;
1088
	send_wr.send_flags = IB_SEND_SIGNALED;
1089

S
Sagi Grimberg 已提交
1090
	atomic_inc(&ib_conn->post_send_buf_count);
1091

S
Sagi Grimberg 已提交
1092
	ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
1093 1094
	if (ib_ret) {
		iser_err("ib_post_send failed, ret:%d\n", ib_ret);
S
Sagi Grimberg 已提交
1095
		atomic_dec(&ib_conn->post_send_buf_count);
1096
	}
1097
	return ib_ret;
1098 1099
}

1100
static void iser_handle_comp_error(struct iser_tx_desc *desc,
S
Sagi Grimberg 已提交
1101
				   struct ib_conn *ib_conn)
1102
{
S
Sagi Grimberg 已提交
1103 1104 1105
	struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
						   ib_conn);

1106 1107
	if (desc && desc->type == ISCSI_TX_DATAOUT)
		kmem_cache_free(ig.desc_cache, desc);
1108

S
Sagi Grimberg 已提交
1109 1110
	if (ib_conn->post_recv_buf_count == 0 &&
	    atomic_read(&ib_conn->post_send_buf_count) == 0) {
1111 1112 1113 1114 1115 1116
		/**
		 * getting here when the state is UP means that the conn is
		 * being terminated asynchronously from the iSCSI layer's
		 * perspective. It is safe to peek at the connection state
		 * since iscsi_conn_failure is allowed to be called twice.
		 **/
1117 1118
		if (iser_conn->state == ISER_CONN_UP)
			iscsi_conn_failure(iser_conn->iscsi_conn,
1119 1120
					   ISCSI_ERR_CONN_FAILED);

1121 1122
		/* no more non completed posts to the QP, complete the
		 * termination process w.o worrying on disconnect event */
1123
		complete(&iser_conn->flush_completion);
1124
	}
1125 1126
}

1127
static int iser_drain_tx_cq(struct iser_device  *device, int cq_index)
1128
{
1129
	struct ib_cq  *cq = device->tx_cq[cq_index];
1130
	struct ib_wc  wc;
1131
	struct iser_tx_desc *tx_desc;
S
Sagi Grimberg 已提交
1132
	struct ib_conn *ib_conn;
1133 1134 1135
	int completed_tx = 0;

	while (ib_poll_cq(cq, 1, &wc) == 1) {
1136
		tx_desc	= (struct iser_tx_desc *) (unsigned long) wc.wr_id;
S
Sagi Grimberg 已提交
1137
		ib_conn = wc.qp->qp_context;
1138 1139
		if (wc.status == IB_WC_SUCCESS) {
			if (wc.opcode == IB_WC_SEND)
S
Sagi Grimberg 已提交
1140
				iser_snd_completion(tx_desc, ib_conn);
1141
			else
1142 1143 1144 1145
				iser_err("expected opcode %d got %d\n",
					IB_WC_SEND, wc.opcode);
		} else {
			iser_err("tx id %llx status %d vend_err %x\n",
1146
				 wc.wr_id, wc.status, wc.vendor_err);
1147
			if (wc.wr_id != ISER_FASTREG_LI_WRID) {
S
Sagi Grimberg 已提交
1148 1149
				atomic_dec(&ib_conn->post_send_buf_count);
				iser_handle_comp_error(tx_desc, ib_conn);
1150
			}
1151 1152 1153 1154 1155 1156 1157
		}
		completed_tx++;
	}
	return completed_tx;
}


1158 1159
static void iser_cq_tasklet_fn(unsigned long data)
{
1160 1161 1162 1163
	struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data;
	struct iser_device  *device = cq_desc->device;
	int cq_index = cq_desc->cq_index;
	struct ib_cq	     *cq = device->rx_cq[cq_index];
1164
	 struct ib_wc	     wc;
1165
	 struct iser_rx_desc *desc;
1166
	 unsigned long	     xfer_len;
S
Sagi Grimberg 已提交
1167
	struct ib_conn *ib_conn;
1168 1169 1170 1171 1172 1173
	int completed_tx, completed_rx = 0;

	/* First do tx drain, so in a case where we have rx flushes and a successful
	 * tx completion we will still go through completion error handling.
	 */
	completed_tx = iser_drain_tx_cq(device, cq_index);
1174 1175

	while (ib_poll_cq(cq, 1, &wc) == 1) {
1176
		desc	 = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
1177
		BUG_ON(desc == NULL);
S
Sagi Grimberg 已提交
1178
		ib_conn = wc.qp->qp_context;
1179
		if (wc.status == IB_WC_SUCCESS) {
1180
			if (wc.opcode == IB_WC_RECV) {
1181
				xfer_len = (unsigned long)wc.byte_len;
S
Sagi Grimberg 已提交
1182
				iser_rcv_completion(desc, xfer_len, ib_conn);
1183 1184 1185
			} else
				iser_err("expected opcode %d got %d\n",
					IB_WC_RECV, wc.opcode);
1186
		} else {
1187
			if (wc.status != IB_WC_WR_FLUSH_ERR)
1188
				iser_err("rx id %llx status %d vend_err %x\n",
1189
					wc.wr_id, wc.status, wc.vendor_err);
S
Sagi Grimberg 已提交
1190 1191
			ib_conn->post_recv_buf_count--;
			iser_handle_comp_error(NULL, ib_conn);
1192
		}
1193 1194
		completed_rx++;
		if (!(completed_rx & 63))
1195
			completed_tx += iser_drain_tx_cq(device, cq_index);
1196 1197 1198 1199
	}
	/* #warning "it is assumed here that arming CQ only once its empty" *
	 * " would not cause interrupts to be missed"                       */
	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1200 1201

	iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
1202 1203 1204 1205
}

static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
{
1206 1207 1208
	struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context;
	struct iser_device  *device = cq_desc->device;
	int cq_index = cq_desc->cq_index;
1209

1210
	tasklet_schedule(&device->cq_tasklet[cq_index]);
1211
}
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236

u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
			     enum iser_data_dir cmd_dir, sector_t *sector)
{
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
	struct fast_reg_descriptor *desc = reg->mem_h;
	unsigned long sector_size = iser_task->sc->device->sector_size;
	struct ib_mr_status mr_status;
	int ret;

	if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
		desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
		ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
					 IB_MR_CHECK_SIG_STATUS, &mr_status);
		if (ret) {
			pr_err("ib_check_mr_status failed, ret %d\n", ret);
			goto err;
		}

		if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
			sector_t sector_off = mr_status.sig_err.sig_err_offset;

			do_div(sector_off, sector_size + 8);
			*sector = scsi_get_lba(iser_task->sc) + sector_off;

1237
			pr_err("PI error found type %d at sector %llx "
1238
			       "expected %x vs actual %x\n",
1239 1240
			       mr_status.sig_err.err_type,
			       (unsigned long long)*sector,
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
			       mr_status.sig_err.expected,
			       mr_status.sig_err.actual);

			switch (mr_status.sig_err.err_type) {
			case IB_SIG_BAD_GUARD:
				return 0x1;
			case IB_SIG_BAD_REFTAG:
				return 0x3;
			case IB_SIG_BAD_APPTAG:
				return 0x2;
			}
		}
	}

	return 0;
err:
	/* Not alot we can do here, return ambiguous guard error */
	return 0x1;
}