iser_verbs.c 36.1 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/module.h>
36
#include <linux/slab.h>
37 38 39 40 41
#include <linux/delay.h>

#include "iscsi_iser.h"

#define ISCSI_ISER_MAX_CONN	8
42 43
#define ISER_MAX_RX_LEN		(ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
#define ISER_MAX_TX_LEN		(ISER_QP_MAX_REQ_DTOS  * ISCSI_ISER_MAX_CONN)
44 45
#define ISER_MAX_CQ_LEN		(ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
				 ISCSI_ISER_MAX_CONN)
46

47 48
static int iser_cq_poll_limit = 512;

49 50 51 52 53 54 55 56 57 58 59 60 61
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);

static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
	iser_err("got cq event %d \n", cause->event);
}

static void iser_qp_event_callback(struct ib_event *cause, void *context)
{
	iser_err("got qp event %d\n",cause->event);
}

62 63 64 65 66 67 68
static void iser_event_handler(struct ib_event_handler *handler,
				struct ib_event *event)
{
	iser_err("async event %d on device %s port %d\n", event->event,
		event->device->name, event->element.port_num);
}

69 70 71 72 73 74 75 76 77
/**
 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
 * the adapator.
 *
 * returns 0 on success, -1 on failure
 */
static int iser_create_device_ib_res(struct iser_device *device)
{
78
	struct ib_device_attr *dev_attr = &device->dev_attr;
79
	int ret, i, max_cqe;
80

81 82
	ret = ib_query_device(device->ib_device, dev_attr);
	if (ret) {
83
		pr_warn("Query device failed for %s\n", device->ib_device->name);
84
		return ret;
85 86 87 88 89 90 91 92 93 94 95 96
	}

	/* Assign function handles  - based on FMR support */
	if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
	    device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
		iser_info("FMR supported, using FMR for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
		device->iser_free_rdma_reg_res = iser_free_fmr_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
	} else
	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
97 98 99 100 101
		iser_info("FastReg supported, using FastReg for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
		device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
102
	} else {
103
		iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
104
		return -1;
105
	}
106

107
	device->comps_used = min_t(int, num_online_cpus(),
108
				 device->ib_device->num_comp_vectors);
109

110 111 112 113 114
	device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
				GFP_KERNEL);
	if (!device->comps)
		goto comps_err;

115 116 117
	max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);

	iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
118
		  device->comps_used, device->ib_device->name,
119
		  device->ib_device->num_comp_vectors, max_cqe);
120

121 122 123 124
	device->pd = ib_alloc_pd(device->ib_device);
	if (IS_ERR(device->pd))
		goto pd_err;

125 126 127 128
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

		comp->device = device;
129 130 131 132
		comp->cq = ib_create_cq(device->ib_device,
					iser_cq_callback,
					iser_cq_event_callback,
					(void *)comp,
133
					max_cqe, i);
134 135
		if (IS_ERR(comp->cq)) {
			comp->cq = NULL;
136
			goto cq_err;
137
		}
138

139
		if (ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP))
140
			goto cq_err;
141

142 143
		tasklet_init(&comp->tasklet, iser_cq_tasklet_fn,
			     (unsigned long)comp);
144
	}
145

146 147 148
	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
				   IB_ACCESS_REMOTE_WRITE |
				   IB_ACCESS_REMOTE_READ);
149 150 151
	if (IS_ERR(device->mr))
		goto dma_mr_err;

152 153 154 155 156
	INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
				iser_event_handler);
	if (ib_register_event_handler(&device->event_handler))
		goto handler_err;

157 158
	return 0;

159 160
handler_err:
	ib_dereg_mr(device->mr);
161
dma_mr_err:
162 163
	for (i = 0; i < device->comps_used; i++)
		tasklet_kill(&device->comps[i].tasklet);
164
cq_err:
165 166 167
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

168 169
		if (comp->cq)
			ib_destroy_cq(comp->cq);
170
	}
171 172
	ib_dealloc_pd(device->pd);
pd_err:
173 174
	kfree(device->comps);
comps_err:
175 176 177 178 179
	iser_err("failed to allocate an IB resource\n");
	return -1;
}

/**
180
 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
181 182 183 184
 * CQ and PD created with the device associated with the adapator.
 */
static void iser_free_device_ib_res(struct iser_device *device)
{
185
	int i;
186 187
	BUG_ON(device->mr == NULL);

188 189 190 191
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

		tasklet_kill(&comp->tasklet);
192 193
		ib_destroy_cq(comp->cq);
		comp->cq = NULL;
194 195
	}

196
	(void)ib_unregister_event_handler(&device->event_handler);
197 198 199
	(void)ib_dereg_mr(device->mr);
	(void)ib_dealloc_pd(device->pd);

200 201 202
	kfree(device->comps);
	device->comps = NULL;

203 204 205 206 207
	device->mr = NULL;
	device->pd = NULL;
}

/**
208
 * iser_create_fmr_pool - Creates FMR pool and page_vector
209
 *
210
 * returns 0 on success, or errno code on failure
211
 */
S
Sagi Grimberg 已提交
212
int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
213
{
S
Sagi Grimberg 已提交
214
	struct iser_device *device = ib_conn->device;
215
	struct ib_fmr_pool_param params;
216
	int ret = -ENOMEM;
217

S
Sagi Grimberg 已提交
218
	ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
219 220
					(sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
					GFP_KERNEL);
S
Sagi Grimberg 已提交
221
	if (!ib_conn->fmr.page_vec)
222
		return ret;
223

S
Sagi Grimberg 已提交
224
	ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
225

226
	params.page_shift        = SHIFT_4K;
227 228 229 230 231
	/* when the first/last SG element are not start/end *
	 * page aligned, the map whould be of N+1 pages     */
	params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
	/* make the pool size twice the max number of SCSI commands *
	 * the ML is expected to queue, watermark for unmap at 50%  */
232 233
	params.pool_size	 = cmds_max * 2;
	params.dirty_watermark	 = cmds_max;
234 235 236 237 238 239
	params.cache		 = 0;
	params.flush_function	 = NULL;
	params.access		 = (IB_ACCESS_LOCAL_WRITE  |
				    IB_ACCESS_REMOTE_WRITE |
				    IB_ACCESS_REMOTE_READ);

S
Sagi Grimberg 已提交
240 241
	ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
	if (!IS_ERR(ib_conn->fmr.pool))
242 243 244
		return 0;

	/* no FMR => no need for page_vec */
S
Sagi Grimberg 已提交
245 246
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
247

S
Sagi Grimberg 已提交
248 249
	ret = PTR_ERR(ib_conn->fmr.pool);
	ib_conn->fmr.pool = NULL;
250 251 252 253
	if (ret != -ENOSYS) {
		iser_err("FMR allocation failed, err %d\n", ret);
		return ret;
	} else {
254
		iser_warn("FMRs are not supported, using unaligned mode\n");
255
		return 0;
256
	}
257 258 259 260 261
}

/**
 * iser_free_fmr_pool - releases the FMR pool and page vec
 */
S
Sagi Grimberg 已提交
262
void iser_free_fmr_pool(struct ib_conn *ib_conn)
263 264
{
	iser_info("freeing conn %p fmr pool %p\n",
S
Sagi Grimberg 已提交
265
		  ib_conn, ib_conn->fmr.pool);
266

S
Sagi Grimberg 已提交
267 268
	if (ib_conn->fmr.pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr.pool);
269

S
Sagi Grimberg 已提交
270
	ib_conn->fmr.pool = NULL;
271

S
Sagi Grimberg 已提交
272 273
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
274 275
}

276 277
static int
iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
278
			 bool pi_enable, struct fast_reg_descriptor *desc)
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
{
	int ret;

	desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
						      ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_frpl)) {
		ret = PTR_ERR(desc->data_frpl);
		iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
			 ret);
		return PTR_ERR(desc->data_frpl);
	}

	desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_mr)) {
		ret = PTR_ERR(desc->data_mr);
		iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
		goto fast_reg_mr_failure;
	}
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
	desc->reg_indicators |= ISER_DATA_KEY_VALID;

	if (pi_enable) {
		struct ib_mr_init_attr mr_init_attr = {0};
		struct iser_pi_context *pi_ctx = NULL;

		desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
		if (!desc->pi_ctx) {
			iser_err("Failed to allocate pi context\n");
			ret = -ENOMEM;
			goto pi_ctx_alloc_failure;
		}
		pi_ctx = desc->pi_ctx;

		pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
						    ISCSI_ISER_SG_TABLESIZE);
		if (IS_ERR(pi_ctx->prot_frpl)) {
			ret = PTR_ERR(pi_ctx->prot_frpl);
			iser_err("Failed to allocate prot frpl ret=%d\n",
				 ret);
			goto prot_frpl_failure;
		}

		pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
						ISCSI_ISER_SG_TABLESIZE + 1);
		if (IS_ERR(pi_ctx->prot_mr)) {
			ret = PTR_ERR(pi_ctx->prot_mr);
			iser_err("Failed to allocate prot frmr ret=%d\n",
				 ret);
			goto prot_mr_failure;
		}
		desc->reg_indicators |= ISER_PROT_KEY_VALID;

		mr_init_attr.max_reg_descriptors = 2;
		mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
		pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
		if (IS_ERR(pi_ctx->sig_mr)) {
			ret = PTR_ERR(pi_ctx->sig_mr);
			iser_err("Failed to allocate signature enabled mr err=%d\n",
				 ret);
			goto sig_mr_failure;
		}
		desc->reg_indicators |= ISER_SIG_KEY_VALID;
	}
	desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;

343 344
	iser_dbg("Create fr_desc %p page_list %p\n",
		 desc, desc->data_frpl->page_list);
345 346

	return 0;
347 348 349 350 351 352 353 354
sig_mr_failure:
	ib_dereg_mr(desc->pi_ctx->prot_mr);
prot_mr_failure:
	ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
prot_frpl_failure:
	kfree(desc->pi_ctx);
pi_ctx_alloc_failure:
	ib_dereg_mr(desc->data_mr);
355 356 357 358 359 360
fast_reg_mr_failure:
	ib_free_fast_reg_page_list(desc->data_frpl);

	return ret;
}

361
/**
362
 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
363 364 365
 * for fast registration work requests.
 * returns 0 on success, or errno code on failure
 */
S
Sagi Grimberg 已提交
366
int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
367
{
S
Sagi Grimberg 已提交
368 369
	struct iser_device *device = ib_conn->device;
	struct fast_reg_descriptor *desc;
370 371
	int i, ret;

S
Sagi Grimberg 已提交
372 373
	INIT_LIST_HEAD(&ib_conn->fastreg.pool);
	ib_conn->fastreg.pool_size = 0;
374
	for (i = 0; i < cmds_max; i++) {
375
		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
376 377 378 379 380 381
		if (!desc) {
			iser_err("Failed to allocate a new fast_reg descriptor\n");
			ret = -ENOMEM;
			goto err;
		}

382
		ret = iser_create_fastreg_desc(device->ib_device, device->pd,
S
Sagi Grimberg 已提交
383
					       ib_conn->pi_support, desc);
384 385 386 387 388
		if (ret) {
			iser_err("Failed to create fastreg descriptor err=%d\n",
				 ret);
			kfree(desc);
			goto err;
389 390
		}

S
Sagi Grimberg 已提交
391 392
		list_add_tail(&desc->list, &ib_conn->fastreg.pool);
		ib_conn->fastreg.pool_size++;
393 394 395
	}

	return 0;
396

397
err:
S
Sagi Grimberg 已提交
398
	iser_free_fastreg_pool(ib_conn);
399 400 401 402
	return ret;
}

/**
403
 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
404
 */
S
Sagi Grimberg 已提交
405
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
406 407 408 409
{
	struct fast_reg_descriptor *desc, *tmp;
	int i = 0;

S
Sagi Grimberg 已提交
410
	if (list_empty(&ib_conn->fastreg.pool))
411 412
		return;

S
Sagi Grimberg 已提交
413
	iser_info("freeing conn %p fr pool\n", ib_conn);
414

S
Sagi Grimberg 已提交
415
	list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
416 417 418
		list_del(&desc->list);
		ib_free_fast_reg_page_list(desc->data_frpl);
		ib_dereg_mr(desc->data_mr);
419 420 421 422 423 424
		if (desc->pi_ctx) {
			ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
			ib_dereg_mr(desc->pi_ctx->prot_mr);
			ib_destroy_mr(desc->pi_ctx->sig_mr);
			kfree(desc->pi_ctx);
		}
425 426 427 428
		kfree(desc);
		++i;
	}

S
Sagi Grimberg 已提交
429
	if (i < ib_conn->fastreg.pool_size)
430
		iser_warn("pool still has %d regions registered\n",
S
Sagi Grimberg 已提交
431
			  ib_conn->fastreg.pool_size - i);
432 433
}

434 435 436 437 438
/**
 * iser_create_ib_conn_res - Queue-Pair (QP)
 *
 * returns 0 on success, -1 on failure
 */
S
Sagi Grimberg 已提交
439
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
440
{
441 442
	struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
						   ib_conn);
443
	struct iser_device	*device;
444
	struct ib_device_attr *dev_attr;
445 446 447 448
	struct ib_qp_init_attr	init_attr;
	int			ret = -ENOMEM;
	int index, min_index = 0;

S
Sagi Grimberg 已提交
449
	BUG_ON(ib_conn->device == NULL);
450

S
Sagi Grimberg 已提交
451
	device = ib_conn->device;
452
	dev_attr = &device->dev_attr;
453 454 455

	memset(&init_attr, 0, sizeof init_attr);

456 457
	mutex_lock(&ig.connlist_mutex);
	/* select the CQ with the minimal number of usages */
458 459 460
	for (index = 0; index < device->comps_used; index++) {
		if (device->comps[index].active_qps <
		    device->comps[min_index].active_qps)
461
			min_index = index;
462 463 464
	}
	ib_conn->comp = &device->comps[min_index];
	ib_conn->comp->active_qps++;
465
	mutex_unlock(&ig.connlist_mutex);
S
Sagi Grimberg 已提交
466
	iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
467

468
	init_attr.event_handler = iser_qp_event_callback;
S
Sagi Grimberg 已提交
469
	init_attr.qp_context	= (void *)ib_conn;
470 471
	init_attr.send_cq	= ib_conn->comp->cq;
	init_attr.recv_cq	= ib_conn->comp->cq;
472
	init_attr.cap.max_recv_wr  = ISER_QP_MAX_RECV_DTOS;
473
	init_attr.cap.max_send_sge = 2;
474
	init_attr.cap.max_recv_sge = 1;
475 476
	init_attr.sq_sig_type	= IB_SIGNAL_REQ_WR;
	init_attr.qp_type	= IB_QPT_RC;
S
Sagi Grimberg 已提交
477
	if (ib_conn->pi_support) {
478
		init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
479
		init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
480 481
		iser_conn->max_cmds =
			ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
482
	} else {
483 484 485 486 487 488 489 490 491 492 493
		if (dev_attr->max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
			init_attr.cap.max_send_wr  = ISER_QP_MAX_REQ_DTOS + 1;
			iser_conn->max_cmds =
				ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
		} else {
			init_attr.cap.max_send_wr = dev_attr->max_qp_wr;
			iser_conn->max_cmds =
				ISER_GET_MAX_XMIT_CMDS(dev_attr->max_qp_wr);
			iser_dbg("device %s supports max_send_wr %d\n",
				 device->ib_device->name, dev_attr->max_qp_wr);
		}
494
	}
495

S
Sagi Grimberg 已提交
496
	ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
497
	if (ret)
498
		goto out_err;
499

S
Sagi Grimberg 已提交
500
	ib_conn->qp = ib_conn->cma_id->qp;
501
	iser_info("setting conn %p cma_id %p qp %p\n",
S
Sagi Grimberg 已提交
502 503
		  ib_conn, ib_conn->cma_id,
		  ib_conn->cma_id->qp);
504 505
	return ret;

506
out_err:
507 508 509
	mutex_lock(&ig.connlist_mutex);
	ib_conn->comp->active_qps--;
	mutex_unlock(&ig.connlist_mutex);
510
	iser_err("unable to alloc mem or create resource, err %d\n", ret);
511

512 513 514 515 516 517 518 519 520 521
	return ret;
}

/**
 * based on the resolved device node GUID see if there already allocated
 * device for this device. If there's no such, create one.
 */
static
struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
{
A
Arne Redlich 已提交
522
	struct iser_device *device;
523 524 525

	mutex_lock(&ig.device_list_mutex);

A
Arne Redlich 已提交
526
	list_for_each_entry(device, &ig.device_list, ig_list)
527 528
		/* find if there's a match using the node GUID */
		if (device->ib_device->node_guid == cma_id->device->node_guid)
529
			goto inc_refcnt;
A
Arne Redlich 已提交
530 531 532 533 534 535 536 537 538 539 540 541

	device = kzalloc(sizeof *device, GFP_KERNEL);
	if (device == NULL)
		goto out;

	/* assign this device to the device */
	device->ib_device = cma_id->device;
	/* init the device and link it into ig device list */
	if (iser_create_device_ib_res(device)) {
		kfree(device);
		device = NULL;
		goto out;
542
	}
A
Arne Redlich 已提交
543 544
	list_add(&device->ig_list, &ig.device_list);

545
inc_refcnt:
546
	device->refcount++;
547
out:
548 549 550 551 552 553 554 555 556
	mutex_unlock(&ig.device_list_mutex);
	return device;
}

/* if there's no demand for this device, release it */
static void iser_device_try_release(struct iser_device *device)
{
	mutex_lock(&ig.device_list_mutex);
	device->refcount--;
557
	iser_info("device %p refcount %d\n", device, device->refcount);
558 559 560 561 562 563 564 565
	if (!device->refcount) {
		iser_free_device_ib_res(device);
		list_del(&device->ig_list);
		kfree(device);
	}
	mutex_unlock(&ig.device_list_mutex);
}

566 567 568
/**
 * Called with state mutex held
 **/
569 570 571
static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
				     enum iser_conn_state comp,
				     enum iser_conn_state exch)
572 573 574
{
	int ret;

575 576 577 578
	ret = (iser_conn->state == comp);
	if (ret)
		iser_conn->state = exch;

579 580 581
	return ret;
}

582 583
void iser_release_work(struct work_struct *work)
{
584
	struct iser_conn *iser_conn;
585

586
	iser_conn = container_of(work, struct iser_conn, release_work);
587

588 589 590 591
	/* Wait for conn_stop to complete */
	wait_for_completion(&iser_conn->stop_completion);
	/* Wait for IB resouces cleanup to complete */
	wait_for_completion(&iser_conn->ib_completion);
592

593 594 595
	mutex_lock(&iser_conn->state_mutex);
	iser_conn->state = ISER_CONN_DOWN;
	mutex_unlock(&iser_conn->state_mutex);
596

597
	iser_conn_release(iser_conn);
598 599
}

600 601 602
/**
 * iser_free_ib_conn_res - release IB related resources
 * @iser_conn: iser connection struct
603 604 605
 * @destroy: indicator if we need to try to release the
 *     iser device and memory regoins pool (only iscsi
 *     shutdown and DEVICE_REMOVAL will use this).
606 607 608 609 610
 *
 * This routine is called with the iser state mutex held
 * so the cm_id removal is out of here. It is Safe to
 * be invoked multiple times.
 */
611
static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
612
				  bool destroy)
613 614 615 616 617 618 619 620
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;

	iser_info("freeing conn %p cma_id %p qp %p\n",
		  iser_conn, ib_conn->cma_id, ib_conn->qp);

	if (ib_conn->qp != NULL) {
621
		ib_conn->comp->active_qps--;
622 623 624 625
		rdma_destroy_qp(ib_conn->cma_id);
		ib_conn->qp = NULL;
	}

626 627 628 629 630 631 632 633
	if (destroy) {
		if (iser_conn->rx_descs)
			iser_free_rx_descriptors(iser_conn);

		if (device != NULL) {
			iser_device_try_release(device);
			ib_conn->device = NULL;
		}
634 635 636
	}
}

637 638 639
/**
 * Frees all conn objects and deallocs conn descriptor
 */
640
void iser_conn_release(struct iser_conn *iser_conn)
641
{
S
Sagi Grimberg 已提交
642
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
643 644

	mutex_lock(&ig.connlist_mutex);
645
	list_del(&iser_conn->conn_list);
646
	mutex_unlock(&ig.connlist_mutex);
647

648
	mutex_lock(&iser_conn->state_mutex);
649
	/* In case we endup here without ep_disconnect being invoked. */
650
	if (iser_conn->state != ISER_CONN_DOWN) {
651 652
		iser_warn("iser conn %p state %d, expected state down.\n",
			  iser_conn, iser_conn->state);
653
		iscsi_destroy_endpoint(iser_conn->ep);
654 655
		iser_conn->state = ISER_CONN_DOWN;
	}
656 657 658 659 660
	/*
	 * In case we never got to bind stage, we still need to
	 * release IB resources (which is safe to call more than once).
	 */
	iser_free_ib_conn_res(iser_conn, true);
661
	mutex_unlock(&iser_conn->state_mutex);
662

S
Sagi Grimberg 已提交
663 664 665
	if (ib_conn->cma_id != NULL) {
		rdma_destroy_id(ib_conn->cma_id);
		ib_conn->cma_id = NULL;
R
Roi Dayan 已提交
666
	}
S
Sagi Grimberg 已提交
667

668
	kfree(iser_conn);
669 670
}

671 672
/**
 * triggers start of the disconnect procedures and wait for them to be done
673
 * Called with state mutex held
674
 */
675
int iser_conn_terminate(struct iser_conn *iser_conn)
676
{
S
Sagi Grimberg 已提交
677
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
678
	struct ib_send_wr *bad_wr;
679 680
	int err = 0;

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
	/* terminate the iser conn only if the conn state is UP */
	if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
				       ISER_CONN_TERMINATING))
		return 0;

	iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);

	/* suspend queuing of new iscsi commands */
	if (iser_conn->iscsi_conn)
		iscsi_suspend_queue(iser_conn->iscsi_conn);

	/*
	 * In case we didn't already clean up the cma_id (peer initiated
	 * a disconnection), we need to Cause the CMA to change the QP
	 * state to ERROR.
696
	 */
697 698 699 700 701 702
	if (ib_conn->cma_id) {
		err = rdma_disconnect(ib_conn->cma_id);
		if (err)
			iser_err("Failed to disconnect, conn: 0x%p err %d\n",
				 iser_conn, err);

703 704
		/* post an indication that all flush errors were consumed */
		err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr);
705
		if (err) {
706
			iser_err("conn %p failed to post beacon", ib_conn);
707 708
			return 1;
		}
709

710
		wait_for_completion(&ib_conn->flush_comp);
711
	}
712

713
	return 1;
714 715
}

716 717 718
/**
 * Called with state mutex held
 **/
719
static void iser_connect_error(struct rdma_cm_id *cma_id)
720
{
721
	struct iser_conn *iser_conn;
722

723
	iser_conn = (struct iser_conn *)cma_id->context;
724
	iser_conn->state = ISER_CONN_TERMINATING;
725 726
}

727 728 729
/**
 * Called with state mutex held
 **/
730
static void iser_addr_handler(struct rdma_cm_id *cma_id)
731 732
{
	struct iser_device *device;
733
	struct iser_conn   *iser_conn;
S
Sagi Grimberg 已提交
734
	struct ib_conn   *ib_conn;
735 736
	int    ret;

737 738
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
739 740 741
		/* bailout */
		return;

S
Sagi Grimberg 已提交
742
	ib_conn = &iser_conn->ib_conn;
743
	device = iser_device_find_by_ib_device(cma_id);
744 745
	if (!device) {
		iser_err("device lookup/creation failed\n");
746 747
		iser_connect_error(cma_id);
		return;
748 749
	}

S
Sagi Grimberg 已提交
750
	ib_conn->device = device;
751

752 753 754 755 756 757
	/* connection T10-PI support */
	if (iser_pi_enable) {
		if (!(device->dev_attr.device_cap_flags &
		      IB_DEVICE_SIGNATURE_HANDOVER)) {
			iser_warn("T10-PI requested but not supported on %s, "
				  "continue without T10-PI\n",
S
Sagi Grimberg 已提交
758 759
				  ib_conn->device->ib_device->name);
			ib_conn->pi_support = false;
760
		} else {
S
Sagi Grimberg 已提交
761
			ib_conn->pi_support = true;
762 763 764
		}
	}

765 766 767
	ret = rdma_resolve_route(cma_id, 1000);
	if (ret) {
		iser_err("resolve route failed: %d\n", ret);
768 769
		iser_connect_error(cma_id);
		return;
770 771 772
	}
}

773 774 775
/**
 * Called with state mutex held
 **/
776
static void iser_route_handler(struct rdma_cm_id *cma_id)
777 778 779
{
	struct rdma_conn_param conn_param;
	int    ret;
780
	struct iser_cm_hdr req_hdr;
781
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
S
Sagi Grimberg 已提交
782 783
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
784

785
	if (iser_conn->state != ISER_CONN_PENDING)
786 787 788
		/* bailout */
		return;

S
Sagi Grimberg 已提交
789
	ret = iser_create_ib_conn_res(ib_conn);
790 791 792 793
	if (ret)
		goto failure;

	memset(&conn_param, 0, sizeof conn_param);
794
	conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
795 796 797 798
	conn_param.initiator_depth     = 1;
	conn_param.retry_count	       = 7;
	conn_param.rnr_retry_count     = 6;

799 800 801 802 803 804
	memset(&req_hdr, 0, sizeof(req_hdr));
	req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
			ISER_SEND_W_INV_NOT_SUPPORTED);
	conn_param.private_data		= (void *)&req_hdr;
	conn_param.private_data_len	= sizeof(struct iser_cm_hdr);

805 806 807 808 809 810
	ret = rdma_connect(cma_id, &conn_param);
	if (ret) {
		iser_err("failure connecting: %d\n", ret);
		goto failure;
	}

811
	return;
812
failure:
813
	iser_connect_error(cma_id);
814 815 816 817
}

static void iser_connected_handler(struct rdma_cm_id *cma_id)
{
818
	struct iser_conn *iser_conn;
819 820 821
	struct ib_qp_attr attr;
	struct ib_qp_init_attr init_attr;

822 823
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
824 825 826
		/* bailout */
		return;

827 828
	(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
	iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
829

830 831
	iser_conn->state = ISER_CONN_UP;
	complete(&iser_conn->up_completion);
832 833
}

834
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
835
{
836
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
837

838
	if (iser_conn_terminate(iser_conn)) {
839
		if (iser_conn->iscsi_conn)
840 841
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);
842 843 844
		else
			iser_err("iscsi_iser connection isn't bound\n");
	}
845 846 847
}

static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
848
				 bool destroy)
849 850
{
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
851

852 853 854 855
	/*
	 * We are not guaranteed that we visited disconnected_handler
	 * by now, call it here to be safe that we handle CM drep
	 * and flush errors.
856
	 */
857
	iser_disconnected_handler(cma_id);
858
	iser_free_ib_conn_res(iser_conn, destroy);
859 860
	complete(&iser_conn->ib_completion);
};
861 862 863

static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
864
	struct iser_conn *iser_conn;
865
	int ret = 0;
866

867
	iser_conn = (struct iser_conn *)cma_id->context;
868 869
	iser_info("event %d status %d conn %p id %p\n",
		  event->event, event->status, cma_id->context, cma_id);
870

871
	mutex_lock(&iser_conn->state_mutex);
872 873
	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
874
		iser_addr_handler(cma_id);
875 876
		break;
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
877
		iser_route_handler(cma_id);
878 879 880 881 882 883 884 885 886
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		iser_connected_handler(cma_id);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
	case RDMA_CM_EVENT_ROUTE_ERROR:
	case RDMA_CM_EVENT_CONNECT_ERROR:
	case RDMA_CM_EVENT_UNREACHABLE:
	case RDMA_CM_EVENT_REJECTED:
887
		iser_connect_error(cma_id);
888 889
		break;
	case RDMA_CM_EVENT_DISCONNECTED:
890
	case RDMA_CM_EVENT_ADDR_CHANGE:
891 892
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
		iser_cleanup_handler(cma_id, false);
893
		break;
894 895 896 897
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		/*
		 * we *must* destroy the device as we cannot rely
		 * on iscsid to be around to initiate error handling.
898 899
		 * also if we are not in state DOWN implicitly destroy
		 * the cma_id.
900 901
		 */
		iser_cleanup_handler(cma_id, true);
902 903 904 905
		if (iser_conn->state != ISER_CONN_DOWN) {
			iser_conn->ib_conn.cma_id = NULL;
			ret = 1;
		}
906
		break;
907
	default:
908
		iser_err("Unexpected RDMA CM event (%d)\n", event->event);
909 910
		break;
	}
911
	mutex_unlock(&iser_conn->state_mutex);
912 913

	return ret;
914 915
}

916
void iser_conn_init(struct iser_conn *iser_conn)
917
{
918
	iser_conn->state = ISER_CONN_INIT;
S
Sagi Grimberg 已提交
919
	iser_conn->ib_conn.post_recv_buf_count = 0;
920
	init_completion(&iser_conn->ib_conn.flush_comp);
921
	init_completion(&iser_conn->stop_completion);
922
	init_completion(&iser_conn->ib_completion);
923 924
	init_completion(&iser_conn->up_completion);
	INIT_LIST_HEAD(&iser_conn->conn_list);
S
Sagi Grimberg 已提交
925
	spin_lock_init(&iser_conn->ib_conn.lock);
926
	mutex_init(&iser_conn->state_mutex);
927 928 929 930
}

 /**
 * starts the process of connecting to the target
931
 * sleeps until the connection is established or rejected
932
 */
933
int iser_connect(struct iser_conn   *iser_conn,
R
Roi Dayan 已提交
934 935
		 struct sockaddr    *src_addr,
		 struct sockaddr    *dst_addr,
936 937
		 int                 non_blocking)
{
S
Sagi Grimberg 已提交
938
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
939 940
	int err = 0;

941
	mutex_lock(&iser_conn->state_mutex);
942

943
	sprintf(iser_conn->name, "%pISp", dst_addr);
R
Roi Dayan 已提交
944

945
	iser_info("connecting to: %s\n", iser_conn->name);
946 947

	/* the device is known only --after-- address resolution */
S
Sagi Grimberg 已提交
948
	ib_conn->device = NULL;
949

950
	iser_conn->state = ISER_CONN_PENDING;
951

952 953 954
	ib_conn->beacon.wr_id = ISER_BEACON_WRID;
	ib_conn->beacon.opcode = IB_WR_SEND;

S
Sagi Grimberg 已提交
955 956 957 958 959
	ib_conn->cma_id = rdma_create_id(iser_cma_handler,
					 (void *)iser_conn,
					 RDMA_PS_TCP, IB_QPT_RC);
	if (IS_ERR(ib_conn->cma_id)) {
		err = PTR_ERR(ib_conn->cma_id);
960 961 962 963
		iser_err("rdma_create_id failed: %d\n", err);
		goto id_failure;
	}

S
Sagi Grimberg 已提交
964
	err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
965 966 967 968 969 970
	if (err) {
		iser_err("rdma_resolve_addr failed: %d\n", err);
		goto addr_failure;
	}

	if (!non_blocking) {
971
		wait_for_completion_interruptible(&iser_conn->up_completion);
972

973
		if (iser_conn->state != ISER_CONN_UP) {
974 975 976 977
			err =  -EIO;
			goto connect_failure;
		}
	}
978
	mutex_unlock(&iser_conn->state_mutex);
979 980

	mutex_lock(&ig.connlist_mutex);
981
	list_add(&iser_conn->conn_list, &ig.connlist);
982 983 984 985
	mutex_unlock(&ig.connlist_mutex);
	return 0;

id_failure:
S
Sagi Grimberg 已提交
986
	ib_conn->cma_id = NULL;
987
addr_failure:
988
	iser_conn->state = ISER_CONN_DOWN;
989
connect_failure:
990 991
	mutex_unlock(&iser_conn->state_mutex);
	iser_conn_release(iser_conn);
992 993 994 995 996 997 998 999
	return err;
}

/**
 * iser_reg_page_vec - Register physical memory
 *
 * returns: 0 on success, errno code on failure
 */
S
Sagi Grimberg 已提交
1000
int iser_reg_page_vec(struct ib_conn *ib_conn,
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
		      struct iser_page_vec *page_vec,
		      struct iser_mem_reg  *mem_reg)
{
	struct ib_pool_fmr *mem;
	u64		   io_addr;
	u64		   *page_list;
	int		   status;

	page_list = page_vec->pages;
	io_addr	  = page_list[0];

S
Sagi Grimberg 已提交
1012
	mem  = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
1013 1014
				    page_list,
				    page_vec->length,
1015
				    io_addr);
1016 1017 1018 1019 1020 1021 1022 1023 1024

	if (IS_ERR(mem)) {
		status = (int)PTR_ERR(mem);
		iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
		return status;
	}

	mem_reg->lkey  = mem->fmr->lkey;
	mem_reg->rkey  = mem->fmr->rkey;
1025
	mem_reg->len   = page_vec->length * SIZE_4K;
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
	mem_reg->va    = io_addr;
	mem_reg->mem_h = (void *)mem;

	mem_reg->va   += page_vec->offset;
	mem_reg->len   = page_vec->data_size;

	iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
		 "entry[0]: (0x%08lx,%ld)] -> "
		 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
		 page_vec, page_vec->length,
		 (unsigned long)page_vec->pages[0],
		 (unsigned long)page_vec->data_size,
		 (unsigned int)mem_reg->lkey, mem_reg->mem_h,
		 (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
	return 0;
}

/**
1044 1045
 * Unregister (previosuly registered using FMR) memory.
 * If memory is non-FMR does nothing.
1046
 */
1047 1048
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
			enum iser_data_dir cmd_dir)
1049
{
1050
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1051 1052
	int ret;

1053
	if (!reg->mem_h)
1054 1055
		return;

1056 1057 1058 1059 1060 1061 1062 1063 1064
	iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);

	ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
	if (ret)
		iser_err("ib_fmr_pool_unmap failed %d\n", ret);

	reg->mem_h = NULL;
}

1065 1066
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
			    enum iser_data_dir cmd_dir)
1067 1068
{
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1069
	struct iser_conn *iser_conn = iser_task->iser_conn;
S
Sagi Grimberg 已提交
1070
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1071 1072
	struct fast_reg_descriptor *desc = reg->mem_h;

1073
	if (!desc)
1074 1075 1076
		return;

	reg->mem_h = NULL;
S
Sagi Grimberg 已提交
1077 1078 1079
	spin_lock_bh(&ib_conn->lock);
	list_add_tail(&desc->list, &ib_conn->fastreg.pool);
	spin_unlock_bh(&ib_conn->lock);
1080 1081
}

1082
int iser_post_recvl(struct iser_conn *iser_conn)
1083 1084
{
	struct ib_recv_wr rx_wr, *rx_wr_failed;
S
Sagi Grimberg 已提交
1085
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1086 1087 1088
	struct ib_sge	  sge;
	int ib_ret;

1089
	sge.addr   = iser_conn->login_resp_dma;
1090
	sge.length = ISER_RX_LOGIN_SIZE;
S
Sagi Grimberg 已提交
1091
	sge.lkey   = ib_conn->device->mr->lkey;
1092

S
Sagi Grimberg 已提交
1093
	rx_wr.wr_id   = (uintptr_t)iser_conn->login_resp_buf;
1094 1095 1096 1097
	rx_wr.sg_list = &sge;
	rx_wr.num_sge = 1;
	rx_wr.next    = NULL;

S
Sagi Grimberg 已提交
1098 1099
	ib_conn->post_recv_buf_count++;
	ib_ret	= ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
1100 1101
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1102
		ib_conn->post_recv_buf_count--;
1103 1104 1105 1106
	}
	return ib_ret;
}

1107
int iser_post_recvm(struct iser_conn *iser_conn, int count)
1108 1109 1110
{
	struct ib_recv_wr *rx_wr, *rx_wr_failed;
	int i, ib_ret;
S
Sagi Grimberg 已提交
1111
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1112
	unsigned int my_rx_head = iser_conn->rx_desc_head;
1113 1114
	struct iser_rx_desc *rx_desc;

S
Sagi Grimberg 已提交
1115
	for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1116
		rx_desc		= &iser_conn->rx_descs[my_rx_head];
S
Sagi Grimberg 已提交
1117
		rx_wr->wr_id	= (uintptr_t)rx_desc;
1118 1119 1120
		rx_wr->sg_list	= &rx_desc->rx_sg;
		rx_wr->num_sge	= 1;
		rx_wr->next	= rx_wr + 1;
1121
		my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1122 1123 1124 1125 1126
	}

	rx_wr--;
	rx_wr->next = NULL; /* mark end of work requests list */

S
Sagi Grimberg 已提交
1127 1128
	ib_conn->post_recv_buf_count += count;
	ib_ret	= ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
1129 1130
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1131
		ib_conn->post_recv_buf_count -= count;
1132
	} else
1133
		iser_conn->rx_desc_head = my_rx_head;
1134 1135 1136 1137
	return ib_ret;
}


1138 1139 1140 1141 1142
/**
 * iser_start_send - Initiate a Send DTO operation
 *
 * returns 0 on success, -1 on failure
 */
1143 1144
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
		   bool signal)
1145
{
1146
	int		  ib_ret;
1147 1148
	struct ib_send_wr send_wr, *send_wr_failed;

S
Sagi Grimberg 已提交
1149
	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1150 1151
				      tx_desc->dma_addr, ISER_HEADERS_LEN,
				      DMA_TO_DEVICE);
1152 1153

	send_wr.next	   = NULL;
S
Sagi Grimberg 已提交
1154
	send_wr.wr_id	   = (uintptr_t)tx_desc;
1155 1156
	send_wr.sg_list	   = tx_desc->tx_sg;
	send_wr.num_sge	   = tx_desc->num_sge;
1157
	send_wr.opcode	   = IB_WR_SEND;
1158
	send_wr.send_flags = signal ? IB_SEND_SIGNALED : 0;
1159

S
Sagi Grimberg 已提交
1160
	ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
1161
	if (ib_ret)
1162
		iser_err("ib_post_send failed, ret:%d\n", ib_ret);
1163

1164
	return ib_ret;
1165 1166
}

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
/**
 * is_iser_tx_desc - Indicate if the completion wr_id
 *     is a TX descriptor or not.
 * @iser_conn: iser connection
 * @wr_id: completion WR identifier
 *
 * Since we cannot rely on wc opcode in FLUSH errors
 * we must work around it by checking if the wr_id address
 * falls in the iser connection rx_descs buffer. If so
 * it is an RX descriptor, otherwize it is a TX.
 */
static inline bool
is_iser_tx_desc(struct iser_conn *iser_conn, void *wr_id)
{
	void *start = iser_conn->rx_descs;
	int len = iser_conn->num_rx_descs * sizeof(*iser_conn->rx_descs);

	if (wr_id >= start && wr_id < start + len)
		return false;

	return true;
}

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
/**
 * iser_handle_comp_error() - Handle error completion
 * @ib_conn:   connection RDMA resources
 * @wc:        work completion
 *
 * Notes: We may handle a FLUSH error completion and in this case
 *        we only cleanup in case TX type was DATAOUT. For non-FLUSH
 *        error completion we should also notify iscsi layer that
 *        connection is failed (in case we passed bind stage).
 */
static void
1201
iser_handle_comp_error(struct ib_conn *ib_conn,
1202
		       struct ib_wc *wc)
1203
{
S
Sagi Grimberg 已提交
1204
	void *wr_id = (void *)(uintptr_t)wc->wr_id;
1205 1206 1207 1208 1209 1210 1211 1212
	struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
						   ib_conn);

	if (wc->status != IB_WC_WR_FLUSH_ERR)
		if (iser_conn->iscsi_conn)
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);

S
Sagi Grimberg 已提交
1213 1214
	if (is_iser_tx_desc(iser_conn, wr_id)) {
		struct iser_tx_desc *desc = wr_id;
1215 1216 1217 1218 1219 1220

		if (desc->type == ISCSI_TX_DATAOUT)
			kmem_cache_free(ig.desc_cache, desc);
	} else {
		ib_conn->post_recv_buf_count--;
	}
1221 1222
}

1223 1224 1225 1226 1227 1228 1229 1230 1231
/**
 * iser_handle_wc - handle a single work completion
 * @wc: work completion
 *
 * Soft-IRQ context, work completion can be either
 * SEND or RECV, and can turn out successful or
 * with error (or flush error).
 */
static void iser_handle_wc(struct ib_wc *wc)
1232
{
S
Sagi Grimberg 已提交
1233
	struct ib_conn *ib_conn;
1234 1235
	struct iser_tx_desc *tx_desc;
	struct iser_rx_desc *rx_desc;
1236

1237
	ib_conn = wc->qp->qp_context;
1238
	if (likely(wc->status == IB_WC_SUCCESS)) {
1239
		if (wc->opcode == IB_WC_RECV) {
S
Sagi Grimberg 已提交
1240
			rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
1241 1242 1243 1244
			iser_rcv_completion(rx_desc, wc->byte_len,
					    ib_conn);
		} else
		if (wc->opcode == IB_WC_SEND) {
S
Sagi Grimberg 已提交
1245
			tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
1246
			iser_snd_completion(tx_desc, ib_conn);
1247
		} else {
1248
			iser_err("Unknown wc opcode %d\n", wc->opcode);
1249
		}
1250 1251 1252 1253 1254 1255 1256
	} else {
		if (wc->status != IB_WC_WR_FLUSH_ERR)
			iser_err("wr id %llx status %d vend_err %x\n",
				 wc->wr_id, wc->status, wc->vendor_err);
		else
			iser_dbg("flush error: wr id %llx\n", wc->wr_id);

1257 1258
		if (wc->wr_id != ISER_FASTREG_LI_WRID &&
		    wc->wr_id != ISER_BEACON_WRID)
1259 1260 1261
			iser_handle_comp_error(ib_conn, wc);

		/* complete in case all flush errors were consumed */
1262
		if (wc->wr_id == ISER_BEACON_WRID)
1263
			complete(&ib_conn->flush_comp);
1264 1265 1266
	}
}

1267 1268 1269 1270 1271 1272 1273
/**
 * iser_cq_tasklet_fn - iSER completion polling loop
 * @data: iSER completion context
 *
 * Soft-IRQ context, polling connection CQ until
 * either CQ was empty or we exausted polling budget
 */
1274 1275
static void iser_cq_tasklet_fn(unsigned long data)
{
1276
	struct iser_comp *comp = (struct iser_comp *)data;
1277
	struct ib_cq *cq = comp->cq;
1278 1279
	struct ib_wc *const wcs = comp->wcs;
	int i, n, completed = 0;
1280

1281 1282 1283
	while ((n = ib_poll_cq(cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
		for (i = 0; i < n; i++)
			iser_handle_wc(&wcs[i]);
1284

1285 1286
		completed += n;
		if (completed >= iser_cq_poll_limit)
1287
			break;
1288
	}
1289 1290 1291 1292 1293

	/*
	 * It is assumed here that arming CQ only once its empty
	 * would not cause interrupts to be missed.
	 */
1294
	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1295

1296
	iser_dbg("got %d completions\n", completed);
1297 1298 1299 1300
}

static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
{
1301
	struct iser_comp *comp = cq_context;
1302

1303
	tasklet_schedule(&comp->tasklet);
1304
}
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329

u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
			     enum iser_data_dir cmd_dir, sector_t *sector)
{
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
	struct fast_reg_descriptor *desc = reg->mem_h;
	unsigned long sector_size = iser_task->sc->device->sector_size;
	struct ib_mr_status mr_status;
	int ret;

	if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
		desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
		ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
					 IB_MR_CHECK_SIG_STATUS, &mr_status);
		if (ret) {
			pr_err("ib_check_mr_status failed, ret %d\n", ret);
			goto err;
		}

		if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
			sector_t sector_off = mr_status.sig_err.sig_err_offset;

			do_div(sector_off, sector_size + 8);
			*sector = scsi_get_lba(iser_task->sc) + sector_off;

1330
			pr_err("PI error found type %d at sector %llx "
1331
			       "expected %x vs actual %x\n",
1332 1333
			       mr_status.sig_err.err_type,
			       (unsigned long long)*sector,
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
			       mr_status.sig_err.expected,
			       mr_status.sig_err.actual);

			switch (mr_status.sig_err.err_type) {
			case IB_SIG_BAD_GUARD:
				return 0x1;
			case IB_SIG_BAD_REFTAG:
				return 0x3;
			case IB_SIG_BAD_APPTAG:
				return 0x2;
			}
		}
	}

	return 0;
err:
	/* Not alot we can do here, return ambiguous guard error */
	return 0x1;
}