iser_verbs.c 36.0 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/module.h>
36
#include <linux/slab.h>
37 38 39 40 41
#include <linux/delay.h>

#include "iscsi_iser.h"

#define ISCSI_ISER_MAX_CONN	8
42 43
#define ISER_MAX_RX_LEN		(ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
#define ISER_MAX_TX_LEN		(ISER_QP_MAX_REQ_DTOS  * ISCSI_ISER_MAX_CONN)
44 45
#define ISER_MAX_CQ_LEN		(ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
				 ISCSI_ISER_MAX_CONN)
46

47 48
static int iser_cq_poll_limit = 512;

49 50 51 52 53 54 55 56 57 58 59 60 61
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);

static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
	iser_err("got cq event %d \n", cause->event);
}

static void iser_qp_event_callback(struct ib_event *cause, void *context)
{
	iser_err("got qp event %d\n",cause->event);
}

62 63 64 65 66 67 68
static void iser_event_handler(struct ib_event_handler *handler,
				struct ib_event *event)
{
	iser_err("async event %d on device %s port %d\n", event->event,
		event->device->name, event->element.port_num);
}

69 70 71 72 73 74 75 76 77
/**
 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
 * the adapator.
 *
 * returns 0 on success, -1 on failure
 */
static int iser_create_device_ib_res(struct iser_device *device)
{
78
	struct ib_device_attr *dev_attr = &device->dev_attr;
79
	int ret, i, max_cqe;
80

81 82
	ret = ib_query_device(device->ib_device, dev_attr);
	if (ret) {
83
		pr_warn("Query device failed for %s\n", device->ib_device->name);
84
		return ret;
85 86 87 88 89 90 91 92 93 94 95 96
	}

	/* Assign function handles  - based on FMR support */
	if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
	    device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
		iser_info("FMR supported, using FMR for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
		device->iser_free_rdma_reg_res = iser_free_fmr_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
	} else
	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
97 98 99 100 101
		iser_info("FastReg supported, using FastReg for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
		device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
102
	} else {
103
		iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
104
		return -1;
105
	}
106

107
	device->comps_used = min_t(int, num_online_cpus(),
108
				 device->ib_device->num_comp_vectors);
109

110 111 112 113 114
	device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
				GFP_KERNEL);
	if (!device->comps)
		goto comps_err;

115 116 117
	max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);

	iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
118
		  device->comps_used, device->ib_device->name,
119
		  device->ib_device->num_comp_vectors, max_cqe);
120

121 122 123 124
	device->pd = ib_alloc_pd(device->ib_device);
	if (IS_ERR(device->pd))
		goto pd_err;

125 126 127 128
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

		comp->device = device;
129 130 131 132
		comp->cq = ib_create_cq(device->ib_device,
					iser_cq_callback,
					iser_cq_event_callback,
					(void *)comp,
133
					max_cqe, i);
134 135
		if (IS_ERR(comp->cq)) {
			comp->cq = NULL;
136
			goto cq_err;
137
		}
138

139
		if (ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP))
140
			goto cq_err;
141

142 143
		tasklet_init(&comp->tasklet, iser_cq_tasklet_fn,
			     (unsigned long)comp);
144
	}
145

146 147 148
	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
				   IB_ACCESS_REMOTE_WRITE |
				   IB_ACCESS_REMOTE_READ);
149 150 151
	if (IS_ERR(device->mr))
		goto dma_mr_err;

152 153 154 155 156
	INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
				iser_event_handler);
	if (ib_register_event_handler(&device->event_handler))
		goto handler_err;

157 158
	return 0;

159 160
handler_err:
	ib_dereg_mr(device->mr);
161
dma_mr_err:
162 163
	for (i = 0; i < device->comps_used; i++)
		tasklet_kill(&device->comps[i].tasklet);
164
cq_err:
165 166 167
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

168 169
		if (comp->cq)
			ib_destroy_cq(comp->cq);
170
	}
171 172
	ib_dealloc_pd(device->pd);
pd_err:
173 174
	kfree(device->comps);
comps_err:
175 176 177 178 179
	iser_err("failed to allocate an IB resource\n");
	return -1;
}

/**
180
 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
181 182 183 184
 * CQ and PD created with the device associated with the adapator.
 */
static void iser_free_device_ib_res(struct iser_device *device)
{
185
	int i;
186 187
	BUG_ON(device->mr == NULL);

188 189 190 191
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

		tasklet_kill(&comp->tasklet);
192 193
		ib_destroy_cq(comp->cq);
		comp->cq = NULL;
194 195
	}

196
	(void)ib_unregister_event_handler(&device->event_handler);
197 198 199
	(void)ib_dereg_mr(device->mr);
	(void)ib_dealloc_pd(device->pd);

200 201 202
	kfree(device->comps);
	device->comps = NULL;

203 204 205 206 207
	device->mr = NULL;
	device->pd = NULL;
}

/**
208
 * iser_create_fmr_pool - Creates FMR pool and page_vector
209
 *
210
 * returns 0 on success, or errno code on failure
211
 */
S
Sagi Grimberg 已提交
212
int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
213
{
S
Sagi Grimberg 已提交
214
	struct iser_device *device = ib_conn->device;
215
	struct ib_fmr_pool_param params;
216
	int ret = -ENOMEM;
217

S
Sagi Grimberg 已提交
218
	ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
219 220
					(sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
					GFP_KERNEL);
S
Sagi Grimberg 已提交
221
	if (!ib_conn->fmr.page_vec)
222
		return ret;
223

S
Sagi Grimberg 已提交
224
	ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
225

226
	params.page_shift        = SHIFT_4K;
227 228 229 230 231
	/* when the first/last SG element are not start/end *
	 * page aligned, the map whould be of N+1 pages     */
	params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
	/* make the pool size twice the max number of SCSI commands *
	 * the ML is expected to queue, watermark for unmap at 50%  */
232 233
	params.pool_size	 = cmds_max * 2;
	params.dirty_watermark	 = cmds_max;
234 235 236 237 238 239
	params.cache		 = 0;
	params.flush_function	 = NULL;
	params.access		 = (IB_ACCESS_LOCAL_WRITE  |
				    IB_ACCESS_REMOTE_WRITE |
				    IB_ACCESS_REMOTE_READ);

S
Sagi Grimberg 已提交
240 241
	ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
	if (!IS_ERR(ib_conn->fmr.pool))
242 243 244
		return 0;

	/* no FMR => no need for page_vec */
S
Sagi Grimberg 已提交
245 246
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
247

S
Sagi Grimberg 已提交
248 249
	ret = PTR_ERR(ib_conn->fmr.pool);
	ib_conn->fmr.pool = NULL;
250 251 252 253
	if (ret != -ENOSYS) {
		iser_err("FMR allocation failed, err %d\n", ret);
		return ret;
	} else {
254
		iser_warn("FMRs are not supported, using unaligned mode\n");
255
		return 0;
256
	}
257 258 259 260 261
}

/**
 * iser_free_fmr_pool - releases the FMR pool and page vec
 */
S
Sagi Grimberg 已提交
262
void iser_free_fmr_pool(struct ib_conn *ib_conn)
263 264
{
	iser_info("freeing conn %p fmr pool %p\n",
S
Sagi Grimberg 已提交
265
		  ib_conn, ib_conn->fmr.pool);
266

S
Sagi Grimberg 已提交
267 268
	if (ib_conn->fmr.pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr.pool);
269

S
Sagi Grimberg 已提交
270
	ib_conn->fmr.pool = NULL;
271

S
Sagi Grimberg 已提交
272 273
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
274 275
}

276 277
static int
iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
278
			 bool pi_enable, struct fast_reg_descriptor *desc)
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
{
	int ret;

	desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
						      ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_frpl)) {
		ret = PTR_ERR(desc->data_frpl);
		iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
			 ret);
		return PTR_ERR(desc->data_frpl);
	}

	desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_mr)) {
		ret = PTR_ERR(desc->data_mr);
		iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
		goto fast_reg_mr_failure;
	}
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
	desc->reg_indicators |= ISER_DATA_KEY_VALID;

	if (pi_enable) {
		struct ib_mr_init_attr mr_init_attr = {0};
		struct iser_pi_context *pi_ctx = NULL;

		desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
		if (!desc->pi_ctx) {
			iser_err("Failed to allocate pi context\n");
			ret = -ENOMEM;
			goto pi_ctx_alloc_failure;
		}
		pi_ctx = desc->pi_ctx;

		pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
						    ISCSI_ISER_SG_TABLESIZE);
		if (IS_ERR(pi_ctx->prot_frpl)) {
			ret = PTR_ERR(pi_ctx->prot_frpl);
			iser_err("Failed to allocate prot frpl ret=%d\n",
				 ret);
			goto prot_frpl_failure;
		}

		pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
						ISCSI_ISER_SG_TABLESIZE + 1);
		if (IS_ERR(pi_ctx->prot_mr)) {
			ret = PTR_ERR(pi_ctx->prot_mr);
			iser_err("Failed to allocate prot frmr ret=%d\n",
				 ret);
			goto prot_mr_failure;
		}
		desc->reg_indicators |= ISER_PROT_KEY_VALID;

		mr_init_attr.max_reg_descriptors = 2;
		mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
		pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
		if (IS_ERR(pi_ctx->sig_mr)) {
			ret = PTR_ERR(pi_ctx->sig_mr);
			iser_err("Failed to allocate signature enabled mr err=%d\n",
				 ret);
			goto sig_mr_failure;
		}
		desc->reg_indicators |= ISER_SIG_KEY_VALID;
	}
	desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;

343 344
	iser_dbg("Create fr_desc %p page_list %p\n",
		 desc, desc->data_frpl->page_list);
345 346

	return 0;
347 348 349 350 351 352 353 354
sig_mr_failure:
	ib_dereg_mr(desc->pi_ctx->prot_mr);
prot_mr_failure:
	ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
prot_frpl_failure:
	kfree(desc->pi_ctx);
pi_ctx_alloc_failure:
	ib_dereg_mr(desc->data_mr);
355 356 357 358 359 360
fast_reg_mr_failure:
	ib_free_fast_reg_page_list(desc->data_frpl);

	return ret;
}

361
/**
362
 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
363 364 365
 * for fast registration work requests.
 * returns 0 on success, or errno code on failure
 */
S
Sagi Grimberg 已提交
366
int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
367
{
S
Sagi Grimberg 已提交
368 369
	struct iser_device *device = ib_conn->device;
	struct fast_reg_descriptor *desc;
370 371
	int i, ret;

S
Sagi Grimberg 已提交
372 373
	INIT_LIST_HEAD(&ib_conn->fastreg.pool);
	ib_conn->fastreg.pool_size = 0;
374
	for (i = 0; i < cmds_max; i++) {
375
		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
376 377 378 379 380 381
		if (!desc) {
			iser_err("Failed to allocate a new fast_reg descriptor\n");
			ret = -ENOMEM;
			goto err;
		}

382
		ret = iser_create_fastreg_desc(device->ib_device, device->pd,
S
Sagi Grimberg 已提交
383
					       ib_conn->pi_support, desc);
384 385 386 387 388
		if (ret) {
			iser_err("Failed to create fastreg descriptor err=%d\n",
				 ret);
			kfree(desc);
			goto err;
389 390
		}

S
Sagi Grimberg 已提交
391 392
		list_add_tail(&desc->list, &ib_conn->fastreg.pool);
		ib_conn->fastreg.pool_size++;
393 394 395
	}

	return 0;
396

397
err:
S
Sagi Grimberg 已提交
398
	iser_free_fastreg_pool(ib_conn);
399 400 401 402
	return ret;
}

/**
403
 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
404
 */
S
Sagi Grimberg 已提交
405
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
406 407 408 409
{
	struct fast_reg_descriptor *desc, *tmp;
	int i = 0;

S
Sagi Grimberg 已提交
410
	if (list_empty(&ib_conn->fastreg.pool))
411 412
		return;

S
Sagi Grimberg 已提交
413
	iser_info("freeing conn %p fr pool\n", ib_conn);
414

S
Sagi Grimberg 已提交
415
	list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
416 417 418
		list_del(&desc->list);
		ib_free_fast_reg_page_list(desc->data_frpl);
		ib_dereg_mr(desc->data_mr);
419 420 421 422 423 424
		if (desc->pi_ctx) {
			ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
			ib_dereg_mr(desc->pi_ctx->prot_mr);
			ib_destroy_mr(desc->pi_ctx->sig_mr);
			kfree(desc->pi_ctx);
		}
425 426 427 428
		kfree(desc);
		++i;
	}

S
Sagi Grimberg 已提交
429
	if (i < ib_conn->fastreg.pool_size)
430
		iser_warn("pool still has %d regions registered\n",
S
Sagi Grimberg 已提交
431
			  ib_conn->fastreg.pool_size - i);
432 433
}

434 435 436 437 438
/**
 * iser_create_ib_conn_res - Queue-Pair (QP)
 *
 * returns 0 on success, -1 on failure
 */
S
Sagi Grimberg 已提交
439
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
440
{
441 442
	struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
						   ib_conn);
443
	struct iser_device	*device;
444
	struct ib_device_attr *dev_attr;
445 446 447 448
	struct ib_qp_init_attr	init_attr;
	int			ret = -ENOMEM;
	int index, min_index = 0;

S
Sagi Grimberg 已提交
449
	BUG_ON(ib_conn->device == NULL);
450

S
Sagi Grimberg 已提交
451
	device = ib_conn->device;
452
	dev_attr = &device->dev_attr;
453 454 455

	memset(&init_attr, 0, sizeof init_attr);

456 457
	mutex_lock(&ig.connlist_mutex);
	/* select the CQ with the minimal number of usages */
458 459 460
	for (index = 0; index < device->comps_used; index++) {
		if (device->comps[index].active_qps <
		    device->comps[min_index].active_qps)
461
			min_index = index;
462 463 464
	}
	ib_conn->comp = &device->comps[min_index];
	ib_conn->comp->active_qps++;
465
	mutex_unlock(&ig.connlist_mutex);
S
Sagi Grimberg 已提交
466
	iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
467

468
	init_attr.event_handler = iser_qp_event_callback;
S
Sagi Grimberg 已提交
469
	init_attr.qp_context	= (void *)ib_conn;
470 471
	init_attr.send_cq	= ib_conn->comp->cq;
	init_attr.recv_cq	= ib_conn->comp->cq;
472
	init_attr.cap.max_recv_wr  = ISER_QP_MAX_RECV_DTOS;
473
	init_attr.cap.max_send_sge = 2;
474
	init_attr.cap.max_recv_sge = 1;
475 476
	init_attr.sq_sig_type	= IB_SIGNAL_REQ_WR;
	init_attr.qp_type	= IB_QPT_RC;
S
Sagi Grimberg 已提交
477
	if (ib_conn->pi_support) {
478
		init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
479
		init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
480 481
		iser_conn->max_cmds =
			ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
482
	} else {
483 484 485 486 487 488 489 490 491 492 493
		if (dev_attr->max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
			init_attr.cap.max_send_wr  = ISER_QP_MAX_REQ_DTOS + 1;
			iser_conn->max_cmds =
				ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
		} else {
			init_attr.cap.max_send_wr = dev_attr->max_qp_wr;
			iser_conn->max_cmds =
				ISER_GET_MAX_XMIT_CMDS(dev_attr->max_qp_wr);
			iser_dbg("device %s supports max_send_wr %d\n",
				 device->ib_device->name, dev_attr->max_qp_wr);
		}
494
	}
495

S
Sagi Grimberg 已提交
496
	ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
497
	if (ret)
498
		goto out_err;
499

S
Sagi Grimberg 已提交
500
	ib_conn->qp = ib_conn->cma_id->qp;
501
	iser_info("setting conn %p cma_id %p qp %p\n",
S
Sagi Grimberg 已提交
502 503
		  ib_conn, ib_conn->cma_id,
		  ib_conn->cma_id->qp);
504 505
	return ret;

506
out_err:
507 508 509
	mutex_lock(&ig.connlist_mutex);
	ib_conn->comp->active_qps--;
	mutex_unlock(&ig.connlist_mutex);
510
	iser_err("unable to alloc mem or create resource, err %d\n", ret);
511

512 513 514 515 516 517 518 519 520 521
	return ret;
}

/**
 * based on the resolved device node GUID see if there already allocated
 * device for this device. If there's no such, create one.
 */
static
struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
{
A
Arne Redlich 已提交
522
	struct iser_device *device;
523 524 525

	mutex_lock(&ig.device_list_mutex);

A
Arne Redlich 已提交
526
	list_for_each_entry(device, &ig.device_list, ig_list)
527 528
		/* find if there's a match using the node GUID */
		if (device->ib_device->node_guid == cma_id->device->node_guid)
529
			goto inc_refcnt;
A
Arne Redlich 已提交
530 531 532 533 534 535 536 537 538 539 540 541

	device = kzalloc(sizeof *device, GFP_KERNEL);
	if (device == NULL)
		goto out;

	/* assign this device to the device */
	device->ib_device = cma_id->device;
	/* init the device and link it into ig device list */
	if (iser_create_device_ib_res(device)) {
		kfree(device);
		device = NULL;
		goto out;
542
	}
A
Arne Redlich 已提交
543 544
	list_add(&device->ig_list, &ig.device_list);

545
inc_refcnt:
546
	device->refcount++;
547
out:
548 549 550 551 552 553 554 555 556
	mutex_unlock(&ig.device_list_mutex);
	return device;
}

/* if there's no demand for this device, release it */
static void iser_device_try_release(struct iser_device *device)
{
	mutex_lock(&ig.device_list_mutex);
	device->refcount--;
557
	iser_info("device %p refcount %d\n", device, device->refcount);
558 559 560 561 562 563 564 565
	if (!device->refcount) {
		iser_free_device_ib_res(device);
		list_del(&device->ig_list);
		kfree(device);
	}
	mutex_unlock(&ig.device_list_mutex);
}

566 567 568
/**
 * Called with state mutex held
 **/
569 570 571
static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
				     enum iser_conn_state comp,
				     enum iser_conn_state exch)
572 573 574
{
	int ret;

575 576 577 578
	ret = (iser_conn->state == comp);
	if (ret)
		iser_conn->state = exch;

579 580 581
	return ret;
}

582 583
void iser_release_work(struct work_struct *work)
{
584
	struct iser_conn *iser_conn;
585

586
	iser_conn = container_of(work, struct iser_conn, release_work);
587

588 589 590 591
	/* Wait for conn_stop to complete */
	wait_for_completion(&iser_conn->stop_completion);
	/* Wait for IB resouces cleanup to complete */
	wait_for_completion(&iser_conn->ib_completion);
592

593 594 595
	mutex_lock(&iser_conn->state_mutex);
	iser_conn->state = ISER_CONN_DOWN;
	mutex_unlock(&iser_conn->state_mutex);
596

597
	iser_conn_release(iser_conn);
598 599
}

600 601 602
/**
 * iser_free_ib_conn_res - release IB related resources
 * @iser_conn: iser connection struct
603 604 605
 * @destroy_device: indicator if we need to try to release
 *     the iser device (only iscsi shutdown and DEVICE_REMOVAL
 *     will use this.
606 607 608 609 610
 *
 * This routine is called with the iser state mutex held
 * so the cm_id removal is out of here. It is Safe to
 * be invoked multiple times.
 */
611 612
static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
				  bool destroy_device)
613 614 615 616 617 618 619 620 621 622
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;

	iser_info("freeing conn %p cma_id %p qp %p\n",
		  iser_conn, ib_conn->cma_id, ib_conn->qp);

	iser_free_rx_descriptors(iser_conn);

	if (ib_conn->qp != NULL) {
623
		ib_conn->comp->active_qps--;
624 625 626 627
		rdma_destroy_qp(ib_conn->cma_id);
		ib_conn->qp = NULL;
	}

628
	if (destroy_device && device != NULL) {
629 630 631 632 633
		iser_device_try_release(device);
		ib_conn->device = NULL;
	}
}

634 635 636
/**
 * Frees all conn objects and deallocs conn descriptor
 */
637
void iser_conn_release(struct iser_conn *iser_conn)
638
{
S
Sagi Grimberg 已提交
639
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
640 641

	mutex_lock(&ig.connlist_mutex);
642
	list_del(&iser_conn->conn_list);
643
	mutex_unlock(&ig.connlist_mutex);
644

645
	mutex_lock(&iser_conn->state_mutex);
646
	if (iser_conn->state != ISER_CONN_DOWN) {
647 648
		iser_warn("iser conn %p state %d, expected state down.\n",
			  iser_conn, iser_conn->state);
649 650
		iser_conn->state = ISER_CONN_DOWN;
	}
651 652 653 654 655
	/*
	 * In case we never got to bind stage, we still need to
	 * release IB resources (which is safe to call more than once).
	 */
	iser_free_ib_conn_res(iser_conn, true);
656
	mutex_unlock(&iser_conn->state_mutex);
657

S
Sagi Grimberg 已提交
658 659 660
	if (ib_conn->cma_id != NULL) {
		rdma_destroy_id(ib_conn->cma_id);
		ib_conn->cma_id = NULL;
R
Roi Dayan 已提交
661
	}
S
Sagi Grimberg 已提交
662

663
	kfree(iser_conn);
664 665
}

666 667
/**
 * triggers start of the disconnect procedures and wait for them to be done
668
 * Called with state mutex held
669
 */
670
int iser_conn_terminate(struct iser_conn *iser_conn)
671
{
S
Sagi Grimberg 已提交
672
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
673
	struct ib_send_wr *bad_wr;
674 675
	int err = 0;

676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
	/* terminate the iser conn only if the conn state is UP */
	if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
				       ISER_CONN_TERMINATING))
		return 0;

	iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);

	/* suspend queuing of new iscsi commands */
	if (iser_conn->iscsi_conn)
		iscsi_suspend_queue(iser_conn->iscsi_conn);

	/*
	 * In case we didn't already clean up the cma_id (peer initiated
	 * a disconnection), we need to Cause the CMA to change the QP
	 * state to ERROR.
691
	 */
692 693 694 695 696 697
	if (ib_conn->cma_id) {
		err = rdma_disconnect(ib_conn->cma_id);
		if (err)
			iser_err("Failed to disconnect, conn: 0x%p err %d\n",
				 iser_conn, err);

698 699
		/* post an indication that all flush errors were consumed */
		err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr);
700
		if (err) {
701
			iser_err("conn %p failed to post beacon", ib_conn);
702 703
			return 1;
		}
704

705
		wait_for_completion(&ib_conn->flush_comp);
706
	}
707

708
	return 1;
709 710
}

711 712 713
/**
 * Called with state mutex held
 **/
714
static void iser_connect_error(struct rdma_cm_id *cma_id)
715
{
716
	struct iser_conn *iser_conn;
717

718 719
	iser_conn = (struct iser_conn *)cma_id->context;
	iser_conn->state = ISER_CONN_DOWN;
720 721
}

722 723 724
/**
 * Called with state mutex held
 **/
725
static void iser_addr_handler(struct rdma_cm_id *cma_id)
726 727
{
	struct iser_device *device;
728
	struct iser_conn   *iser_conn;
S
Sagi Grimberg 已提交
729
	struct ib_conn   *ib_conn;
730 731
	int    ret;

732 733
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
734 735 736
		/* bailout */
		return;

S
Sagi Grimberg 已提交
737
	ib_conn = &iser_conn->ib_conn;
738
	device = iser_device_find_by_ib_device(cma_id);
739 740
	if (!device) {
		iser_err("device lookup/creation failed\n");
741 742
		iser_connect_error(cma_id);
		return;
743 744
	}

S
Sagi Grimberg 已提交
745
	ib_conn->device = device;
746

747 748 749 750 751 752
	/* connection T10-PI support */
	if (iser_pi_enable) {
		if (!(device->dev_attr.device_cap_flags &
		      IB_DEVICE_SIGNATURE_HANDOVER)) {
			iser_warn("T10-PI requested but not supported on %s, "
				  "continue without T10-PI\n",
S
Sagi Grimberg 已提交
753 754
				  ib_conn->device->ib_device->name);
			ib_conn->pi_support = false;
755
		} else {
S
Sagi Grimberg 已提交
756
			ib_conn->pi_support = true;
757 758 759
		}
	}

760 761 762
	ret = rdma_resolve_route(cma_id, 1000);
	if (ret) {
		iser_err("resolve route failed: %d\n", ret);
763 764
		iser_connect_error(cma_id);
		return;
765 766 767
	}
}

768 769 770
/**
 * Called with state mutex held
 **/
771
static void iser_route_handler(struct rdma_cm_id *cma_id)
772 773 774
{
	struct rdma_conn_param conn_param;
	int    ret;
775
	struct iser_cm_hdr req_hdr;
776
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
S
Sagi Grimberg 已提交
777 778
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
779

780
	if (iser_conn->state != ISER_CONN_PENDING)
781 782 783
		/* bailout */
		return;

S
Sagi Grimberg 已提交
784
	ret = iser_create_ib_conn_res(ib_conn);
785 786 787 788
	if (ret)
		goto failure;

	memset(&conn_param, 0, sizeof conn_param);
789
	conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
790 791 792 793
	conn_param.initiator_depth     = 1;
	conn_param.retry_count	       = 7;
	conn_param.rnr_retry_count     = 6;

794 795 796 797 798 799
	memset(&req_hdr, 0, sizeof(req_hdr));
	req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
			ISER_SEND_W_INV_NOT_SUPPORTED);
	conn_param.private_data		= (void *)&req_hdr;
	conn_param.private_data_len	= sizeof(struct iser_cm_hdr);

800 801 802 803 804 805
	ret = rdma_connect(cma_id, &conn_param);
	if (ret) {
		iser_err("failure connecting: %d\n", ret);
		goto failure;
	}

806
	return;
807
failure:
808
	iser_connect_error(cma_id);
809 810 811 812
}

static void iser_connected_handler(struct rdma_cm_id *cma_id)
{
813
	struct iser_conn *iser_conn;
814 815 816
	struct ib_qp_attr attr;
	struct ib_qp_init_attr init_attr;

817 818
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
819 820 821
		/* bailout */
		return;

822 823
	(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
	iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
824

825 826
	iser_conn->state = ISER_CONN_UP;
	complete(&iser_conn->up_completion);
827 828
}

829
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
830
{
831
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
832

833
	if (iser_conn_terminate(iser_conn)) {
834
		if (iser_conn->iscsi_conn)
835 836
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);
837 838 839
		else
			iser_err("iscsi_iser connection isn't bound\n");
	}
840 841 842 843 844 845
}

static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
				 bool destroy_device)
{
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
846

847 848 849 850
	/*
	 * We are not guaranteed that we visited disconnected_handler
	 * by now, call it here to be safe that we handle CM drep
	 * and flush errors.
851
	 */
852 853 854 855
	iser_disconnected_handler(cma_id);
	iser_free_ib_conn_res(iser_conn, destroy_device);
	complete(&iser_conn->ib_completion);
};
856 857 858

static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
859
	struct iser_conn *iser_conn;
860
	int ret = 0;
861

862
	iser_conn = (struct iser_conn *)cma_id->context;
863 864
	iser_info("event %d status %d conn %p id %p\n",
		  event->event, event->status, cma_id->context, cma_id);
865

866
	mutex_lock(&iser_conn->state_mutex);
867 868
	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
869
		iser_addr_handler(cma_id);
870 871
		break;
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
872
		iser_route_handler(cma_id);
873 874 875 876 877 878 879 880 881
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		iser_connected_handler(cma_id);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
	case RDMA_CM_EVENT_ROUTE_ERROR:
	case RDMA_CM_EVENT_CONNECT_ERROR:
	case RDMA_CM_EVENT_UNREACHABLE:
	case RDMA_CM_EVENT_REJECTED:
882
		iser_connect_error(cma_id);
883 884
		break;
	case RDMA_CM_EVENT_DISCONNECTED:
885
	case RDMA_CM_EVENT_ADDR_CHANGE:
886 887
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
		iser_cleanup_handler(cma_id, false);
888
		break;
889 890 891 892
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		/*
		 * we *must* destroy the device as we cannot rely
		 * on iscsid to be around to initiate error handling.
893 894
		 * also if we are not in state DOWN implicitly destroy
		 * the cma_id.
895 896
		 */
		iser_cleanup_handler(cma_id, true);
897 898 899 900
		if (iser_conn->state != ISER_CONN_DOWN) {
			iser_conn->ib_conn.cma_id = NULL;
			ret = 1;
		}
901
		break;
902
	default:
903
		iser_err("Unexpected RDMA CM event (%d)\n", event->event);
904 905
		break;
	}
906
	mutex_unlock(&iser_conn->state_mutex);
907 908

	return ret;
909 910
}

911
void iser_conn_init(struct iser_conn *iser_conn)
912
{
913
	iser_conn->state = ISER_CONN_INIT;
S
Sagi Grimberg 已提交
914
	iser_conn->ib_conn.post_recv_buf_count = 0;
915
	init_completion(&iser_conn->ib_conn.flush_comp);
916
	init_completion(&iser_conn->stop_completion);
917
	init_completion(&iser_conn->ib_completion);
918 919
	init_completion(&iser_conn->up_completion);
	INIT_LIST_HEAD(&iser_conn->conn_list);
S
Sagi Grimberg 已提交
920
	spin_lock_init(&iser_conn->ib_conn.lock);
921
	mutex_init(&iser_conn->state_mutex);
922 923 924 925
}

 /**
 * starts the process of connecting to the target
926
 * sleeps until the connection is established or rejected
927
 */
928
int iser_connect(struct iser_conn   *iser_conn,
R
Roi Dayan 已提交
929 930
		 struct sockaddr    *src_addr,
		 struct sockaddr    *dst_addr,
931 932
		 int                 non_blocking)
{
S
Sagi Grimberg 已提交
933
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
934 935
	int err = 0;

936
	mutex_lock(&iser_conn->state_mutex);
937

938
	sprintf(iser_conn->name, "%pISp", dst_addr);
R
Roi Dayan 已提交
939

940
	iser_info("connecting to: %s\n", iser_conn->name);
941 942

	/* the device is known only --after-- address resolution */
S
Sagi Grimberg 已提交
943
	ib_conn->device = NULL;
944

945
	iser_conn->state = ISER_CONN_PENDING;
946

947 948 949
	ib_conn->beacon.wr_id = ISER_BEACON_WRID;
	ib_conn->beacon.opcode = IB_WR_SEND;

S
Sagi Grimberg 已提交
950 951 952 953 954
	ib_conn->cma_id = rdma_create_id(iser_cma_handler,
					 (void *)iser_conn,
					 RDMA_PS_TCP, IB_QPT_RC);
	if (IS_ERR(ib_conn->cma_id)) {
		err = PTR_ERR(ib_conn->cma_id);
955 956 957 958
		iser_err("rdma_create_id failed: %d\n", err);
		goto id_failure;
	}

S
Sagi Grimberg 已提交
959
	err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
960 961 962 963 964 965
	if (err) {
		iser_err("rdma_resolve_addr failed: %d\n", err);
		goto addr_failure;
	}

	if (!non_blocking) {
966
		wait_for_completion_interruptible(&iser_conn->up_completion);
967

968
		if (iser_conn->state != ISER_CONN_UP) {
969 970 971 972
			err =  -EIO;
			goto connect_failure;
		}
	}
973
	mutex_unlock(&iser_conn->state_mutex);
974 975

	mutex_lock(&ig.connlist_mutex);
976
	list_add(&iser_conn->conn_list, &ig.connlist);
977 978 979 980
	mutex_unlock(&ig.connlist_mutex);
	return 0;

id_failure:
S
Sagi Grimberg 已提交
981
	ib_conn->cma_id = NULL;
982
addr_failure:
983
	iser_conn->state = ISER_CONN_DOWN;
984
connect_failure:
985 986
	mutex_unlock(&iser_conn->state_mutex);
	iser_conn_release(iser_conn);
987 988 989 990 991 992 993 994
	return err;
}

/**
 * iser_reg_page_vec - Register physical memory
 *
 * returns: 0 on success, errno code on failure
 */
S
Sagi Grimberg 已提交
995
int iser_reg_page_vec(struct ib_conn *ib_conn,
996 997 998 999 1000 1001 1002 1003 1004 1005 1006
		      struct iser_page_vec *page_vec,
		      struct iser_mem_reg  *mem_reg)
{
	struct ib_pool_fmr *mem;
	u64		   io_addr;
	u64		   *page_list;
	int		   status;

	page_list = page_vec->pages;
	io_addr	  = page_list[0];

S
Sagi Grimberg 已提交
1007
	mem  = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
1008 1009
				    page_list,
				    page_vec->length,
1010
				    io_addr);
1011 1012 1013 1014 1015 1016 1017 1018 1019

	if (IS_ERR(mem)) {
		status = (int)PTR_ERR(mem);
		iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
		return status;
	}

	mem_reg->lkey  = mem->fmr->lkey;
	mem_reg->rkey  = mem->fmr->rkey;
1020
	mem_reg->len   = page_vec->length * SIZE_4K;
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	mem_reg->va    = io_addr;
	mem_reg->mem_h = (void *)mem;

	mem_reg->va   += page_vec->offset;
	mem_reg->len   = page_vec->data_size;

	iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
		 "entry[0]: (0x%08lx,%ld)] -> "
		 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
		 page_vec, page_vec->length,
		 (unsigned long)page_vec->pages[0],
		 (unsigned long)page_vec->data_size,
		 (unsigned int)mem_reg->lkey, mem_reg->mem_h,
		 (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
	return 0;
}

/**
1039 1040
 * Unregister (previosuly registered using FMR) memory.
 * If memory is non-FMR does nothing.
1041
 */
1042 1043
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
			enum iser_data_dir cmd_dir)
1044
{
1045
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1046 1047
	int ret;

1048
	if (!reg->mem_h)
1049 1050
		return;

1051 1052 1053 1054 1055 1056 1057 1058 1059
	iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);

	ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
	if (ret)
		iser_err("ib_fmr_pool_unmap failed %d\n", ret);

	reg->mem_h = NULL;
}

1060 1061
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
			    enum iser_data_dir cmd_dir)
1062 1063
{
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1064
	struct iser_conn *iser_conn = iser_task->iser_conn;
S
Sagi Grimberg 已提交
1065
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1066 1067
	struct fast_reg_descriptor *desc = reg->mem_h;

1068
	if (!desc)
1069 1070 1071
		return;

	reg->mem_h = NULL;
S
Sagi Grimberg 已提交
1072 1073 1074
	spin_lock_bh(&ib_conn->lock);
	list_add_tail(&desc->list, &ib_conn->fastreg.pool);
	spin_unlock_bh(&ib_conn->lock);
1075 1076
}

1077
int iser_post_recvl(struct iser_conn *iser_conn)
1078 1079
{
	struct ib_recv_wr rx_wr, *rx_wr_failed;
S
Sagi Grimberg 已提交
1080
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1081 1082 1083
	struct ib_sge	  sge;
	int ib_ret;

1084
	sge.addr   = iser_conn->login_resp_dma;
1085
	sge.length = ISER_RX_LOGIN_SIZE;
S
Sagi Grimberg 已提交
1086
	sge.lkey   = ib_conn->device->mr->lkey;
1087

S
Sagi Grimberg 已提交
1088
	rx_wr.wr_id   = (uintptr_t)iser_conn->login_resp_buf;
1089 1090 1091 1092
	rx_wr.sg_list = &sge;
	rx_wr.num_sge = 1;
	rx_wr.next    = NULL;

S
Sagi Grimberg 已提交
1093 1094
	ib_conn->post_recv_buf_count++;
	ib_ret	= ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
1095 1096
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1097
		ib_conn->post_recv_buf_count--;
1098 1099 1100 1101
	}
	return ib_ret;
}

1102
int iser_post_recvm(struct iser_conn *iser_conn, int count)
1103 1104 1105
{
	struct ib_recv_wr *rx_wr, *rx_wr_failed;
	int i, ib_ret;
S
Sagi Grimberg 已提交
1106
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1107
	unsigned int my_rx_head = iser_conn->rx_desc_head;
1108 1109
	struct iser_rx_desc *rx_desc;

S
Sagi Grimberg 已提交
1110
	for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1111
		rx_desc		= &iser_conn->rx_descs[my_rx_head];
S
Sagi Grimberg 已提交
1112
		rx_wr->wr_id	= (uintptr_t)rx_desc;
1113 1114 1115
		rx_wr->sg_list	= &rx_desc->rx_sg;
		rx_wr->num_sge	= 1;
		rx_wr->next	= rx_wr + 1;
1116
		my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1117 1118 1119 1120 1121
	}

	rx_wr--;
	rx_wr->next = NULL; /* mark end of work requests list */

S
Sagi Grimberg 已提交
1122 1123
	ib_conn->post_recv_buf_count += count;
	ib_ret	= ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
1124 1125
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1126
		ib_conn->post_recv_buf_count -= count;
1127
	} else
1128
		iser_conn->rx_desc_head = my_rx_head;
1129 1130 1131 1132
	return ib_ret;
}


1133 1134 1135 1136 1137
/**
 * iser_start_send - Initiate a Send DTO operation
 *
 * returns 0 on success, -1 on failure
 */
1138 1139
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
		   bool signal)
1140
{
1141
	int		  ib_ret;
1142 1143
	struct ib_send_wr send_wr, *send_wr_failed;

S
Sagi Grimberg 已提交
1144
	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1145 1146
				      tx_desc->dma_addr, ISER_HEADERS_LEN,
				      DMA_TO_DEVICE);
1147 1148

	send_wr.next	   = NULL;
S
Sagi Grimberg 已提交
1149
	send_wr.wr_id	   = (uintptr_t)tx_desc;
1150 1151
	send_wr.sg_list	   = tx_desc->tx_sg;
	send_wr.num_sge	   = tx_desc->num_sge;
1152
	send_wr.opcode	   = IB_WR_SEND;
1153
	send_wr.send_flags = signal ? IB_SEND_SIGNALED : 0;
1154

S
Sagi Grimberg 已提交
1155
	ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
1156
	if (ib_ret)
1157
		iser_err("ib_post_send failed, ret:%d\n", ib_ret);
1158

1159
	return ib_ret;
1160 1161
}

1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
/**
 * is_iser_tx_desc - Indicate if the completion wr_id
 *     is a TX descriptor or not.
 * @iser_conn: iser connection
 * @wr_id: completion WR identifier
 *
 * Since we cannot rely on wc opcode in FLUSH errors
 * we must work around it by checking if the wr_id address
 * falls in the iser connection rx_descs buffer. If so
 * it is an RX descriptor, otherwize it is a TX.
 */
static inline bool
is_iser_tx_desc(struct iser_conn *iser_conn, void *wr_id)
{
	void *start = iser_conn->rx_descs;
	int len = iser_conn->num_rx_descs * sizeof(*iser_conn->rx_descs);

	if (wr_id >= start && wr_id < start + len)
		return false;

	return true;
}

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
/**
 * iser_handle_comp_error() - Handle error completion
 * @ib_conn:   connection RDMA resources
 * @wc:        work completion
 *
 * Notes: We may handle a FLUSH error completion and in this case
 *        we only cleanup in case TX type was DATAOUT. For non-FLUSH
 *        error completion we should also notify iscsi layer that
 *        connection is failed (in case we passed bind stage).
 */
static void
1196
iser_handle_comp_error(struct ib_conn *ib_conn,
1197
		       struct ib_wc *wc)
1198
{
S
Sagi Grimberg 已提交
1199
	void *wr_id = (void *)(uintptr_t)wc->wr_id;
1200 1201 1202 1203 1204 1205 1206 1207
	struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
						   ib_conn);

	if (wc->status != IB_WC_WR_FLUSH_ERR)
		if (iser_conn->iscsi_conn)
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);

S
Sagi Grimberg 已提交
1208 1209
	if (is_iser_tx_desc(iser_conn, wr_id)) {
		struct iser_tx_desc *desc = wr_id;
1210 1211 1212 1213 1214 1215

		if (desc->type == ISCSI_TX_DATAOUT)
			kmem_cache_free(ig.desc_cache, desc);
	} else {
		ib_conn->post_recv_buf_count--;
	}
1216 1217
}

1218 1219 1220 1221 1222 1223 1224 1225 1226
/**
 * iser_handle_wc - handle a single work completion
 * @wc: work completion
 *
 * Soft-IRQ context, work completion can be either
 * SEND or RECV, and can turn out successful or
 * with error (or flush error).
 */
static void iser_handle_wc(struct ib_wc *wc)
1227
{
S
Sagi Grimberg 已提交
1228
	struct ib_conn *ib_conn;
1229 1230
	struct iser_tx_desc *tx_desc;
	struct iser_rx_desc *rx_desc;
1231

1232
	ib_conn = wc->qp->qp_context;
1233
	if (likely(wc->status == IB_WC_SUCCESS)) {
1234
		if (wc->opcode == IB_WC_RECV) {
S
Sagi Grimberg 已提交
1235
			rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
1236 1237 1238 1239
			iser_rcv_completion(rx_desc, wc->byte_len,
					    ib_conn);
		} else
		if (wc->opcode == IB_WC_SEND) {
S
Sagi Grimberg 已提交
1240
			tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
1241
			iser_snd_completion(tx_desc, ib_conn);
1242
		} else {
1243
			iser_err("Unknown wc opcode %d\n", wc->opcode);
1244
		}
1245 1246 1247 1248 1249 1250 1251
	} else {
		if (wc->status != IB_WC_WR_FLUSH_ERR)
			iser_err("wr id %llx status %d vend_err %x\n",
				 wc->wr_id, wc->status, wc->vendor_err);
		else
			iser_dbg("flush error: wr id %llx\n", wc->wr_id);

1252 1253
		if (wc->wr_id != ISER_FASTREG_LI_WRID &&
		    wc->wr_id != ISER_BEACON_WRID)
1254 1255 1256
			iser_handle_comp_error(ib_conn, wc);

		/* complete in case all flush errors were consumed */
1257
		if (wc->wr_id == ISER_BEACON_WRID)
1258
			complete(&ib_conn->flush_comp);
1259 1260 1261
	}
}

1262 1263 1264 1265 1266 1267 1268
/**
 * iser_cq_tasklet_fn - iSER completion polling loop
 * @data: iSER completion context
 *
 * Soft-IRQ context, polling connection CQ until
 * either CQ was empty or we exausted polling budget
 */
1269 1270
static void iser_cq_tasklet_fn(unsigned long data)
{
1271
	struct iser_comp *comp = (struct iser_comp *)data;
1272
	struct ib_cq *cq = comp->cq;
1273 1274
	struct ib_wc *const wcs = comp->wcs;
	int i, n, completed = 0;
1275

1276 1277 1278
	while ((n = ib_poll_cq(cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
		for (i = 0; i < n; i++)
			iser_handle_wc(&wcs[i]);
1279

1280 1281
		completed += n;
		if (completed >= iser_cq_poll_limit)
1282
			break;
1283
	}
1284 1285 1286 1287 1288

	/*
	 * It is assumed here that arming CQ only once its empty
	 * would not cause interrupts to be missed.
	 */
1289
	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1290

1291
	iser_dbg("got %d completions\n", completed);
1292 1293 1294 1295
}

static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
{
1296
	struct iser_comp *comp = cq_context;
1297

1298
	tasklet_schedule(&comp->tasklet);
1299
}
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324

u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
			     enum iser_data_dir cmd_dir, sector_t *sector)
{
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
	struct fast_reg_descriptor *desc = reg->mem_h;
	unsigned long sector_size = iser_task->sc->device->sector_size;
	struct ib_mr_status mr_status;
	int ret;

	if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
		desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
		ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
					 IB_MR_CHECK_SIG_STATUS, &mr_status);
		if (ret) {
			pr_err("ib_check_mr_status failed, ret %d\n", ret);
			goto err;
		}

		if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
			sector_t sector_off = mr_status.sig_err.sig_err_offset;

			do_div(sector_off, sector_size + 8);
			*sector = scsi_get_lba(iser_task->sc) + sector_off;

1325
			pr_err("PI error found type %d at sector %llx "
1326
			       "expected %x vs actual %x\n",
1327 1328
			       mr_status.sig_err.err_type,
			       (unsigned long long)*sector,
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
			       mr_status.sig_err.expected,
			       mr_status.sig_err.actual);

			switch (mr_status.sig_err.err_type) {
			case IB_SIG_BAD_GUARD:
				return 0x1;
			case IB_SIG_BAD_REFTAG:
				return 0x3;
			case IB_SIG_BAD_APPTAG:
				return 0x2;
			}
		}
	}

	return 0;
err:
	/* Not alot we can do here, return ambiguous guard error */
	return 0x1;
}