iser_verbs.c 34.8 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/module.h>
36
#include <linux/slab.h>
37 38 39 40 41
#include <linux/delay.h>

#include "iscsi_iser.h"

#define ISCSI_ISER_MAX_CONN	8
42 43
#define ISER_MAX_RX_LEN		(ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
#define ISER_MAX_TX_LEN		(ISER_QP_MAX_REQ_DTOS  * ISCSI_ISER_MAX_CONN)
44 45
#define ISER_MAX_CQ_LEN		(ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
				 ISCSI_ISER_MAX_CONN)
46

47 48
static int iser_cq_poll_limit = 512;

49 50 51 52 53 54 55 56 57 58 59 60 61
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);

static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
	iser_err("got cq event %d \n", cause->event);
}

static void iser_qp_event_callback(struct ib_event *cause, void *context)
{
	iser_err("got qp event %d\n",cause->event);
}

62 63 64 65 66 67 68
static void iser_event_handler(struct ib_event_handler *handler,
				struct ib_event *event)
{
	iser_err("async event %d on device %s port %d\n", event->event,
		event->device->name, event->element.port_num);
}

69 70 71 72 73 74 75 76 77
/**
 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
 * the adapator.
 *
 * returns 0 on success, -1 on failure
 */
static int iser_create_device_ib_res(struct iser_device *device)
{
78
	struct ib_device_attr *dev_attr = &device->dev_attr;
79
	int ret, i;
80

81 82
	ret = ib_query_device(device->ib_device, dev_attr);
	if (ret) {
83
		pr_warn("Query device failed for %s\n", device->ib_device->name);
84
		return ret;
85 86 87 88 89 90 91 92 93 94 95 96
	}

	/* Assign function handles  - based on FMR support */
	if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
	    device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
		iser_info("FMR supported, using FMR for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
		device->iser_free_rdma_reg_res = iser_free_fmr_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
	} else
	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
97 98 99 100 101
		iser_info("FastReg supported, using FastReg for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
		device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
102
	} else {
103
		iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
104
		return -1;
105
	}
106

107 108
	device->comps_used = min(ISER_MAX_CQ,
				 device->ib_device->num_comp_vectors);
109
	iser_info("using %d CQs, device %s supports %d vectors\n",
110
		  device->comps_used, device->ib_device->name,
111
		  device->ib_device->num_comp_vectors);
112

113 114 115 116
	device->pd = ib_alloc_pd(device->ib_device);
	if (IS_ERR(device->pd))
		goto pd_err;

117 118 119 120
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

		comp->device = device;
121 122 123 124 125 126 127
		comp->cq = ib_create_cq(device->ib_device,
					iser_cq_callback,
					iser_cq_event_callback,
					(void *)comp,
					ISER_MAX_CQ_LEN, i);
		if (IS_ERR(comp->cq)) {
			comp->cq = NULL;
128
			goto cq_err;
129
		}
130

131
		if (ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP))
132
			goto cq_err;
133

134 135
		tasklet_init(&comp->tasklet, iser_cq_tasklet_fn,
			     (unsigned long)comp);
136
	}
137

138 139 140
	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
				   IB_ACCESS_REMOTE_WRITE |
				   IB_ACCESS_REMOTE_READ);
141 142 143
	if (IS_ERR(device->mr))
		goto dma_mr_err;

144 145 146 147 148
	INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
				iser_event_handler);
	if (ib_register_event_handler(&device->event_handler))
		goto handler_err;

149 150
	return 0;

151 152
handler_err:
	ib_dereg_mr(device->mr);
153
dma_mr_err:
154 155
	for (i = 0; i < device->comps_used; i++)
		tasklet_kill(&device->comps[i].tasklet);
156
cq_err:
157 158 159
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

160 161
		if (comp->cq)
			ib_destroy_cq(comp->cq);
162
	}
163 164 165 166 167 168 169
	ib_dealloc_pd(device->pd);
pd_err:
	iser_err("failed to allocate an IB resource\n");
	return -1;
}

/**
170
 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
171 172 173 174
 * CQ and PD created with the device associated with the adapator.
 */
static void iser_free_device_ib_res(struct iser_device *device)
{
175
	int i;
176 177
	BUG_ON(device->mr == NULL);

178 179 180 181
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

		tasklet_kill(&comp->tasklet);
182 183
		ib_destroy_cq(comp->cq);
		comp->cq = NULL;
184 185
	}

186
	(void)ib_unregister_event_handler(&device->event_handler);
187 188 189 190 191 192 193 194
	(void)ib_dereg_mr(device->mr);
	(void)ib_dealloc_pd(device->pd);

	device->mr = NULL;
	device->pd = NULL;
}

/**
195
 * iser_create_fmr_pool - Creates FMR pool and page_vector
196
 *
197
 * returns 0 on success, or errno code on failure
198
 */
S
Sagi Grimberg 已提交
199
int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
200
{
S
Sagi Grimberg 已提交
201
	struct iser_device *device = ib_conn->device;
202
	struct ib_fmr_pool_param params;
203
	int ret = -ENOMEM;
204

S
Sagi Grimberg 已提交
205
	ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
206 207
					(sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
					GFP_KERNEL);
S
Sagi Grimberg 已提交
208
	if (!ib_conn->fmr.page_vec)
209
		return ret;
210

S
Sagi Grimberg 已提交
211
	ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
212

213
	params.page_shift        = SHIFT_4K;
214 215 216 217 218
	/* when the first/last SG element are not start/end *
	 * page aligned, the map whould be of N+1 pages     */
	params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
	/* make the pool size twice the max number of SCSI commands *
	 * the ML is expected to queue, watermark for unmap at 50%  */
219 220
	params.pool_size	 = cmds_max * 2;
	params.dirty_watermark	 = cmds_max;
221 222 223 224 225 226
	params.cache		 = 0;
	params.flush_function	 = NULL;
	params.access		 = (IB_ACCESS_LOCAL_WRITE  |
				    IB_ACCESS_REMOTE_WRITE |
				    IB_ACCESS_REMOTE_READ);

S
Sagi Grimberg 已提交
227 228
	ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
	if (!IS_ERR(ib_conn->fmr.pool))
229 230 231
		return 0;

	/* no FMR => no need for page_vec */
S
Sagi Grimberg 已提交
232 233
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
234

S
Sagi Grimberg 已提交
235 236
	ret = PTR_ERR(ib_conn->fmr.pool);
	ib_conn->fmr.pool = NULL;
237 238 239 240
	if (ret != -ENOSYS) {
		iser_err("FMR allocation failed, err %d\n", ret);
		return ret;
	} else {
241
		iser_warn("FMRs are not supported, using unaligned mode\n");
242
		return 0;
243
	}
244 245 246 247 248
}

/**
 * iser_free_fmr_pool - releases the FMR pool and page vec
 */
S
Sagi Grimberg 已提交
249
void iser_free_fmr_pool(struct ib_conn *ib_conn)
250 251
{
	iser_info("freeing conn %p fmr pool %p\n",
S
Sagi Grimberg 已提交
252
		  ib_conn, ib_conn->fmr.pool);
253

S
Sagi Grimberg 已提交
254 255
	if (ib_conn->fmr.pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr.pool);
256

S
Sagi Grimberg 已提交
257
	ib_conn->fmr.pool = NULL;
258

S
Sagi Grimberg 已提交
259 260
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
261 262
}

263 264
static int
iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
265
			 bool pi_enable, struct fast_reg_descriptor *desc)
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
{
	int ret;

	desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
						      ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_frpl)) {
		ret = PTR_ERR(desc->data_frpl);
		iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
			 ret);
		return PTR_ERR(desc->data_frpl);
	}

	desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_mr)) {
		ret = PTR_ERR(desc->data_mr);
		iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
		goto fast_reg_mr_failure;
	}
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	desc->reg_indicators |= ISER_DATA_KEY_VALID;

	if (pi_enable) {
		struct ib_mr_init_attr mr_init_attr = {0};
		struct iser_pi_context *pi_ctx = NULL;

		desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
		if (!desc->pi_ctx) {
			iser_err("Failed to allocate pi context\n");
			ret = -ENOMEM;
			goto pi_ctx_alloc_failure;
		}
		pi_ctx = desc->pi_ctx;

		pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
						    ISCSI_ISER_SG_TABLESIZE);
		if (IS_ERR(pi_ctx->prot_frpl)) {
			ret = PTR_ERR(pi_ctx->prot_frpl);
			iser_err("Failed to allocate prot frpl ret=%d\n",
				 ret);
			goto prot_frpl_failure;
		}

		pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
						ISCSI_ISER_SG_TABLESIZE + 1);
		if (IS_ERR(pi_ctx->prot_mr)) {
			ret = PTR_ERR(pi_ctx->prot_mr);
			iser_err("Failed to allocate prot frmr ret=%d\n",
				 ret);
			goto prot_mr_failure;
		}
		desc->reg_indicators |= ISER_PROT_KEY_VALID;

		mr_init_attr.max_reg_descriptors = 2;
		mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
		pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
		if (IS_ERR(pi_ctx->sig_mr)) {
			ret = PTR_ERR(pi_ctx->sig_mr);
			iser_err("Failed to allocate signature enabled mr err=%d\n",
				 ret);
			goto sig_mr_failure;
		}
		desc->reg_indicators |= ISER_SIG_KEY_VALID;
	}
	desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;

330 331
	iser_dbg("Create fr_desc %p page_list %p\n",
		 desc, desc->data_frpl->page_list);
332 333

	return 0;
334 335 336 337 338 339 340 341
sig_mr_failure:
	ib_dereg_mr(desc->pi_ctx->prot_mr);
prot_mr_failure:
	ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
prot_frpl_failure:
	kfree(desc->pi_ctx);
pi_ctx_alloc_failure:
	ib_dereg_mr(desc->data_mr);
342 343 344 345 346 347
fast_reg_mr_failure:
	ib_free_fast_reg_page_list(desc->data_frpl);

	return ret;
}

348
/**
349
 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
350 351 352
 * for fast registration work requests.
 * returns 0 on success, or errno code on failure
 */
S
Sagi Grimberg 已提交
353
int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
354
{
S
Sagi Grimberg 已提交
355 356
	struct iser_device *device = ib_conn->device;
	struct fast_reg_descriptor *desc;
357 358
	int i, ret;

S
Sagi Grimberg 已提交
359 360
	INIT_LIST_HEAD(&ib_conn->fastreg.pool);
	ib_conn->fastreg.pool_size = 0;
361
	for (i = 0; i < cmds_max; i++) {
362
		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
363 364 365 366 367 368
		if (!desc) {
			iser_err("Failed to allocate a new fast_reg descriptor\n");
			ret = -ENOMEM;
			goto err;
		}

369
		ret = iser_create_fastreg_desc(device->ib_device, device->pd,
S
Sagi Grimberg 已提交
370
					       ib_conn->pi_support, desc);
371 372 373 374 375
		if (ret) {
			iser_err("Failed to create fastreg descriptor err=%d\n",
				 ret);
			kfree(desc);
			goto err;
376 377
		}

S
Sagi Grimberg 已提交
378 379
		list_add_tail(&desc->list, &ib_conn->fastreg.pool);
		ib_conn->fastreg.pool_size++;
380 381 382
	}

	return 0;
383

384
err:
S
Sagi Grimberg 已提交
385
	iser_free_fastreg_pool(ib_conn);
386 387 388 389
	return ret;
}

/**
390
 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
391
 */
S
Sagi Grimberg 已提交
392
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
393 394 395 396
{
	struct fast_reg_descriptor *desc, *tmp;
	int i = 0;

S
Sagi Grimberg 已提交
397
	if (list_empty(&ib_conn->fastreg.pool))
398 399
		return;

S
Sagi Grimberg 已提交
400
	iser_info("freeing conn %p fr pool\n", ib_conn);
401

S
Sagi Grimberg 已提交
402
	list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
403 404 405
		list_del(&desc->list);
		ib_free_fast_reg_page_list(desc->data_frpl);
		ib_dereg_mr(desc->data_mr);
406 407 408 409 410 411
		if (desc->pi_ctx) {
			ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
			ib_dereg_mr(desc->pi_ctx->prot_mr);
			ib_destroy_mr(desc->pi_ctx->sig_mr);
			kfree(desc->pi_ctx);
		}
412 413 414 415
		kfree(desc);
		++i;
	}

S
Sagi Grimberg 已提交
416
	if (i < ib_conn->fastreg.pool_size)
417
		iser_warn("pool still has %d regions registered\n",
S
Sagi Grimberg 已提交
418
			  ib_conn->fastreg.pool_size - i);
419 420
}

421 422 423 424 425
/**
 * iser_create_ib_conn_res - Queue-Pair (QP)
 *
 * returns 0 on success, -1 on failure
 */
S
Sagi Grimberg 已提交
426
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
427 428 429 430 431 432
{
	struct iser_device	*device;
	struct ib_qp_init_attr	init_attr;
	int			ret = -ENOMEM;
	int index, min_index = 0;

S
Sagi Grimberg 已提交
433
	BUG_ON(ib_conn->device == NULL);
434

S
Sagi Grimberg 已提交
435
	device = ib_conn->device;
436 437 438

	memset(&init_attr, 0, sizeof init_attr);

439 440
	mutex_lock(&ig.connlist_mutex);
	/* select the CQ with the minimal number of usages */
441 442 443
	for (index = 0; index < device->comps_used; index++) {
		if (device->comps[index].active_qps <
		    device->comps[min_index].active_qps)
444
			min_index = index;
445 446 447
	}
	ib_conn->comp = &device->comps[min_index];
	ib_conn->comp->active_qps++;
448
	mutex_unlock(&ig.connlist_mutex);
S
Sagi Grimberg 已提交
449
	iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
450

451
	init_attr.event_handler = iser_qp_event_callback;
S
Sagi Grimberg 已提交
452
	init_attr.qp_context	= (void *)ib_conn;
453 454
	init_attr.send_cq	= ib_conn->comp->cq;
	init_attr.recv_cq	= ib_conn->comp->cq;
455
	init_attr.cap.max_recv_wr  = ISER_QP_MAX_RECV_DTOS;
456
	init_attr.cap.max_send_sge = 2;
457
	init_attr.cap.max_recv_sge = 1;
458 459
	init_attr.sq_sig_type	= IB_SIGNAL_REQ_WR;
	init_attr.qp_type	= IB_QPT_RC;
S
Sagi Grimberg 已提交
460
	if (ib_conn->pi_support) {
461
		init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
462 463
		init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
	} else {
464
		init_attr.cap.max_send_wr  = ISER_QP_MAX_REQ_DTOS + 1;
465
	}
466

S
Sagi Grimberg 已提交
467
	ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
468
	if (ret)
469
		goto out_err;
470

S
Sagi Grimberg 已提交
471
	ib_conn->qp = ib_conn->cma_id->qp;
472
	iser_info("setting conn %p cma_id %p qp %p\n",
S
Sagi Grimberg 已提交
473 474
		  ib_conn, ib_conn->cma_id,
		  ib_conn->cma_id->qp);
475 476
	return ret;

477
out_err:
478 479 480 481 482 483 484 485 486 487 488
	iser_err("unable to alloc mem or create resource, err %d\n", ret);
	return ret;
}

/**
 * based on the resolved device node GUID see if there already allocated
 * device for this device. If there's no such, create one.
 */
static
struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
{
A
Arne Redlich 已提交
489
	struct iser_device *device;
490 491 492

	mutex_lock(&ig.device_list_mutex);

A
Arne Redlich 已提交
493
	list_for_each_entry(device, &ig.device_list, ig_list)
494 495
		/* find if there's a match using the node GUID */
		if (device->ib_device->node_guid == cma_id->device->node_guid)
496
			goto inc_refcnt;
A
Arne Redlich 已提交
497 498 499 500 501 502 503 504 505 506 507 508

	device = kzalloc(sizeof *device, GFP_KERNEL);
	if (device == NULL)
		goto out;

	/* assign this device to the device */
	device->ib_device = cma_id->device;
	/* init the device and link it into ig device list */
	if (iser_create_device_ib_res(device)) {
		kfree(device);
		device = NULL;
		goto out;
509
	}
A
Arne Redlich 已提交
510 511
	list_add(&device->ig_list, &ig.device_list);

512
inc_refcnt:
513
	device->refcount++;
514
out:
515 516 517 518 519 520 521 522 523
	mutex_unlock(&ig.device_list_mutex);
	return device;
}

/* if there's no demand for this device, release it */
static void iser_device_try_release(struct iser_device *device)
{
	mutex_lock(&ig.device_list_mutex);
	device->refcount--;
524
	iser_info("device %p refcount %d\n", device, device->refcount);
525 526 527 528 529 530 531 532
	if (!device->refcount) {
		iser_free_device_ib_res(device);
		list_del(&device->ig_list);
		kfree(device);
	}
	mutex_unlock(&ig.device_list_mutex);
}

533 534 535
/**
 * Called with state mutex held
 **/
536 537 538
static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
				     enum iser_conn_state comp,
				     enum iser_conn_state exch)
539 540 541
{
	int ret;

542 543 544 545
	ret = (iser_conn->state == comp);
	if (ret)
		iser_conn->state = exch;

546 547 548
	return ret;
}

549 550
void iser_release_work(struct work_struct *work)
{
551
	struct iser_conn *iser_conn;
552

553
	iser_conn = container_of(work, struct iser_conn, release_work);
554

555 556 557 558
	/* Wait for conn_stop to complete */
	wait_for_completion(&iser_conn->stop_completion);
	/* Wait for IB resouces cleanup to complete */
	wait_for_completion(&iser_conn->ib_completion);
559

560 561 562
	mutex_lock(&iser_conn->state_mutex);
	iser_conn->state = ISER_CONN_DOWN;
	mutex_unlock(&iser_conn->state_mutex);
563

564
	iser_conn_release(iser_conn);
565 566
}

567 568 569
/**
 * iser_free_ib_conn_res - release IB related resources
 * @iser_conn: iser connection struct
570 571 572
 * @destroy_device: indicator if we need to try to release
 *     the iser device (only iscsi shutdown and DEVICE_REMOVAL
 *     will use this.
573 574 575 576 577
 *
 * This routine is called with the iser state mutex held
 * so the cm_id removal is out of here. It is Safe to
 * be invoked multiple times.
 */
578 579
static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
				  bool destroy_device)
580 581 582 583 584 585 586 587 588 589
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;

	iser_info("freeing conn %p cma_id %p qp %p\n",
		  iser_conn, ib_conn->cma_id, ib_conn->qp);

	iser_free_rx_descriptors(iser_conn);

	if (ib_conn->qp != NULL) {
590
		ib_conn->comp->active_qps--;
591 592 593 594
		rdma_destroy_qp(ib_conn->cma_id);
		ib_conn->qp = NULL;
	}

595
	if (destroy_device && device != NULL) {
596 597 598 599 600
		iser_device_try_release(device);
		ib_conn->device = NULL;
	}
}

601 602 603
/**
 * Frees all conn objects and deallocs conn descriptor
 */
604
void iser_conn_release(struct iser_conn *iser_conn)
605
{
S
Sagi Grimberg 已提交
606
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
607 608

	mutex_lock(&ig.connlist_mutex);
609
	list_del(&iser_conn->conn_list);
610
	mutex_unlock(&ig.connlist_mutex);
611

612
	mutex_lock(&iser_conn->state_mutex);
613 614 615
	if (iser_conn->state != ISER_CONN_DOWN)
		iser_warn("iser conn %p state %d, expected state down.\n",
			  iser_conn, iser_conn->state);
616 617 618 619 620
	/*
	 * In case we never got to bind stage, we still need to
	 * release IB resources (which is safe to call more than once).
	 */
	iser_free_ib_conn_res(iser_conn, true);
621
	mutex_unlock(&iser_conn->state_mutex);
622

S
Sagi Grimberg 已提交
623 624 625
	if (ib_conn->cma_id != NULL) {
		rdma_destroy_id(ib_conn->cma_id);
		ib_conn->cma_id = NULL;
R
Roi Dayan 已提交
626
	}
S
Sagi Grimberg 已提交
627

628
	kfree(iser_conn);
629 630
}

631 632
/**
 * triggers start of the disconnect procedures and wait for them to be done
633
 * Called with state mutex held
634
 */
635
int iser_conn_terminate(struct iser_conn *iser_conn)
636
{
S
Sagi Grimberg 已提交
637
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
638
	struct ib_send_wr *bad_wr;
639 640
	int err = 0;

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
	/* terminate the iser conn only if the conn state is UP */
	if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
				       ISER_CONN_TERMINATING))
		return 0;

	iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);

	/* suspend queuing of new iscsi commands */
	if (iser_conn->iscsi_conn)
		iscsi_suspend_queue(iser_conn->iscsi_conn);

	/*
	 * In case we didn't already clean up the cma_id (peer initiated
	 * a disconnection), we need to Cause the CMA to change the QP
	 * state to ERROR.
656
	 */
657 658 659 660 661 662
	if (ib_conn->cma_id) {
		err = rdma_disconnect(ib_conn->cma_id);
		if (err)
			iser_err("Failed to disconnect, conn: 0x%p err %d\n",
				 iser_conn, err);

663 664 665 666 667
		/* post an indication that all flush errors were consumed */
		err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr);
		if (err)
			iser_err("conn %p failed to post beacon", ib_conn);

668
		wait_for_completion(&ib_conn->flush_comp);
669
	}
670

671
	return 1;
672 673
}

674 675 676
/**
 * Called with state mutex held
 **/
677
static void iser_connect_error(struct rdma_cm_id *cma_id)
678
{
679
	struct iser_conn *iser_conn;
680

681 682
	iser_conn = (struct iser_conn *)cma_id->context;
	iser_conn->state = ISER_CONN_DOWN;
683 684
}

685 686 687
/**
 * Called with state mutex held
 **/
688
static void iser_addr_handler(struct rdma_cm_id *cma_id)
689 690
{
	struct iser_device *device;
691
	struct iser_conn   *iser_conn;
S
Sagi Grimberg 已提交
692
	struct ib_conn   *ib_conn;
693 694
	int    ret;

695 696
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
697 698 699
		/* bailout */
		return;

S
Sagi Grimberg 已提交
700
	ib_conn = &iser_conn->ib_conn;
701
	device = iser_device_find_by_ib_device(cma_id);
702 703
	if (!device) {
		iser_err("device lookup/creation failed\n");
704 705
		iser_connect_error(cma_id);
		return;
706 707
	}

S
Sagi Grimberg 已提交
708
	ib_conn->device = device;
709

710 711 712 713 714 715
	/* connection T10-PI support */
	if (iser_pi_enable) {
		if (!(device->dev_attr.device_cap_flags &
		      IB_DEVICE_SIGNATURE_HANDOVER)) {
			iser_warn("T10-PI requested but not supported on %s, "
				  "continue without T10-PI\n",
S
Sagi Grimberg 已提交
716 717
				  ib_conn->device->ib_device->name);
			ib_conn->pi_support = false;
718
		} else {
S
Sagi Grimberg 已提交
719
			ib_conn->pi_support = true;
720 721 722
		}
	}

723 724 725
	ret = rdma_resolve_route(cma_id, 1000);
	if (ret) {
		iser_err("resolve route failed: %d\n", ret);
726 727
		iser_connect_error(cma_id);
		return;
728 729 730
	}
}

731 732 733
/**
 * Called with state mutex held
 **/
734
static void iser_route_handler(struct rdma_cm_id *cma_id)
735 736 737
{
	struct rdma_conn_param conn_param;
	int    ret;
738
	struct iser_cm_hdr req_hdr;
739
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
S
Sagi Grimberg 已提交
740 741
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
742

743
	if (iser_conn->state != ISER_CONN_PENDING)
744 745 746
		/* bailout */
		return;

S
Sagi Grimberg 已提交
747
	ret = iser_create_ib_conn_res(ib_conn);
748 749 750 751
	if (ret)
		goto failure;

	memset(&conn_param, 0, sizeof conn_param);
752
	conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
753 754 755 756
	conn_param.initiator_depth     = 1;
	conn_param.retry_count	       = 7;
	conn_param.rnr_retry_count     = 6;

757 758 759 760 761 762
	memset(&req_hdr, 0, sizeof(req_hdr));
	req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
			ISER_SEND_W_INV_NOT_SUPPORTED);
	conn_param.private_data		= (void *)&req_hdr;
	conn_param.private_data_len	= sizeof(struct iser_cm_hdr);

763 764 765 766 767 768
	ret = rdma_connect(cma_id, &conn_param);
	if (ret) {
		iser_err("failure connecting: %d\n", ret);
		goto failure;
	}

769
	return;
770
failure:
771
	iser_connect_error(cma_id);
772 773 774 775
}

static void iser_connected_handler(struct rdma_cm_id *cma_id)
{
776
	struct iser_conn *iser_conn;
777 778 779
	struct ib_qp_attr attr;
	struct ib_qp_init_attr init_attr;

780 781
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
782 783 784
		/* bailout */
		return;

785 786
	(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
	iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
787

788 789
	iser_conn->state = ISER_CONN_UP;
	complete(&iser_conn->up_completion);
790 791
}

792
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
793
{
794
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
795

796
	if (iser_conn_terminate(iser_conn)) {
797
		if (iser_conn->iscsi_conn)
798 799
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);
800 801 802
		else
			iser_err("iscsi_iser connection isn't bound\n");
	}
803 804 805 806 807 808
}

static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
				 bool destroy_device)
{
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
809

810 811 812 813
	/*
	 * We are not guaranteed that we visited disconnected_handler
	 * by now, call it here to be safe that we handle CM drep
	 * and flush errors.
814
	 */
815 816 817 818
	iser_disconnected_handler(cma_id);
	iser_free_ib_conn_res(iser_conn, destroy_device);
	complete(&iser_conn->ib_completion);
};
819 820 821

static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
822
	struct iser_conn *iser_conn;
823
	int ret = 0;
824

825
	iser_conn = (struct iser_conn *)cma_id->context;
826 827
	iser_info("event %d status %d conn %p id %p\n",
		  event->event, event->status, cma_id->context, cma_id);
828

829
	mutex_lock(&iser_conn->state_mutex);
830 831
	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
832
		iser_addr_handler(cma_id);
833 834
		break;
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
835
		iser_route_handler(cma_id);
836 837 838 839 840 841 842 843 844
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		iser_connected_handler(cma_id);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
	case RDMA_CM_EVENT_ROUTE_ERROR:
	case RDMA_CM_EVENT_CONNECT_ERROR:
	case RDMA_CM_EVENT_UNREACHABLE:
	case RDMA_CM_EVENT_REJECTED:
845
		iser_connect_error(cma_id);
846 847
		break;
	case RDMA_CM_EVENT_DISCONNECTED:
848
	case RDMA_CM_EVENT_ADDR_CHANGE:
849
		iser_disconnected_handler(cma_id);
850
		break;
851 852 853 854 855 856 857 858 859 860 861 862 863
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		/*
		 * we *must* destroy the device as we cannot rely
		 * on iscsid to be around to initiate error handling.
		 * also implicitly destroy the cma_id.
		 */
		iser_cleanup_handler(cma_id, true);
		iser_conn->ib_conn.cma_id = NULL;
		ret = 1;
		break;
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
		iser_cleanup_handler(cma_id, false);
		break;
864
	default:
865
		iser_err("Unexpected RDMA CM event (%d)\n", event->event);
866 867
		break;
	}
868
	mutex_unlock(&iser_conn->state_mutex);
869 870

	return ret;
871 872
}

873
void iser_conn_init(struct iser_conn *iser_conn)
874
{
875
	iser_conn->state = ISER_CONN_INIT;
S
Sagi Grimberg 已提交
876
	iser_conn->ib_conn.post_recv_buf_count = 0;
877
	init_completion(&iser_conn->ib_conn.flush_comp);
878
	init_completion(&iser_conn->stop_completion);
879
	init_completion(&iser_conn->ib_completion);
880 881
	init_completion(&iser_conn->up_completion);
	INIT_LIST_HEAD(&iser_conn->conn_list);
S
Sagi Grimberg 已提交
882
	spin_lock_init(&iser_conn->ib_conn.lock);
883
	mutex_init(&iser_conn->state_mutex);
884 885 886 887
}

 /**
 * starts the process of connecting to the target
888
 * sleeps until the connection is established or rejected
889
 */
890
int iser_connect(struct iser_conn   *iser_conn,
R
Roi Dayan 已提交
891 892
		 struct sockaddr    *src_addr,
		 struct sockaddr    *dst_addr,
893 894
		 int                 non_blocking)
{
S
Sagi Grimberg 已提交
895
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
896 897
	int err = 0;

898
	mutex_lock(&iser_conn->state_mutex);
899

900
	sprintf(iser_conn->name, "%pISp", dst_addr);
R
Roi Dayan 已提交
901

902
	iser_info("connecting to: %s\n", iser_conn->name);
903 904

	/* the device is known only --after-- address resolution */
S
Sagi Grimberg 已提交
905
	ib_conn->device = NULL;
906

907
	iser_conn->state = ISER_CONN_PENDING;
908

909 910 911
	ib_conn->beacon.wr_id = ISER_BEACON_WRID;
	ib_conn->beacon.opcode = IB_WR_SEND;

S
Sagi Grimberg 已提交
912 913 914 915 916
	ib_conn->cma_id = rdma_create_id(iser_cma_handler,
					 (void *)iser_conn,
					 RDMA_PS_TCP, IB_QPT_RC);
	if (IS_ERR(ib_conn->cma_id)) {
		err = PTR_ERR(ib_conn->cma_id);
917 918 919 920
		iser_err("rdma_create_id failed: %d\n", err);
		goto id_failure;
	}

S
Sagi Grimberg 已提交
921
	err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
922 923 924 925 926 927
	if (err) {
		iser_err("rdma_resolve_addr failed: %d\n", err);
		goto addr_failure;
	}

	if (!non_blocking) {
928
		wait_for_completion_interruptible(&iser_conn->up_completion);
929

930
		if (iser_conn->state != ISER_CONN_UP) {
931 932 933 934
			err =  -EIO;
			goto connect_failure;
		}
	}
935
	mutex_unlock(&iser_conn->state_mutex);
936 937

	mutex_lock(&ig.connlist_mutex);
938
	list_add(&iser_conn->conn_list, &ig.connlist);
939 940 941 942
	mutex_unlock(&ig.connlist_mutex);
	return 0;

id_failure:
S
Sagi Grimberg 已提交
943
	ib_conn->cma_id = NULL;
944
addr_failure:
945
	iser_conn->state = ISER_CONN_DOWN;
946
connect_failure:
947 948
	mutex_unlock(&iser_conn->state_mutex);
	iser_conn_release(iser_conn);
949 950 951 952 953 954 955 956
	return err;
}

/**
 * iser_reg_page_vec - Register physical memory
 *
 * returns: 0 on success, errno code on failure
 */
S
Sagi Grimberg 已提交
957
int iser_reg_page_vec(struct ib_conn *ib_conn,
958 959 960 961 962 963 964 965 966 967 968
		      struct iser_page_vec *page_vec,
		      struct iser_mem_reg  *mem_reg)
{
	struct ib_pool_fmr *mem;
	u64		   io_addr;
	u64		   *page_list;
	int		   status;

	page_list = page_vec->pages;
	io_addr	  = page_list[0];

S
Sagi Grimberg 已提交
969
	mem  = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
970 971
				    page_list,
				    page_vec->length,
972
				    io_addr);
973 974 975 976 977 978 979 980 981

	if (IS_ERR(mem)) {
		status = (int)PTR_ERR(mem);
		iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
		return status;
	}

	mem_reg->lkey  = mem->fmr->lkey;
	mem_reg->rkey  = mem->fmr->rkey;
982
	mem_reg->len   = page_vec->length * SIZE_4K;
983
	mem_reg->va    = io_addr;
984
	mem_reg->is_mr = 1;
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
	mem_reg->mem_h = (void *)mem;

	mem_reg->va   += page_vec->offset;
	mem_reg->len   = page_vec->data_size;

	iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
		 "entry[0]: (0x%08lx,%ld)] -> "
		 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
		 page_vec, page_vec->length,
		 (unsigned long)page_vec->pages[0],
		 (unsigned long)page_vec->data_size,
		 (unsigned int)mem_reg->lkey, mem_reg->mem_h,
		 (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
	return 0;
}

/**
1002 1003
 * Unregister (previosuly registered using FMR) memory.
 * If memory is non-FMR does nothing.
1004
 */
1005 1006
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
			enum iser_data_dir cmd_dir)
1007
{
1008
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1009 1010
	int ret;

1011
	if (!reg->is_mr)
1012 1013
		return;

1014 1015 1016 1017 1018 1019 1020 1021 1022
	iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);

	ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
	if (ret)
		iser_err("ib_fmr_pool_unmap failed %d\n", ret);

	reg->mem_h = NULL;
}

1023 1024
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
			    enum iser_data_dir cmd_dir)
1025 1026
{
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1027
	struct iser_conn *iser_conn = iser_task->iser_conn;
S
Sagi Grimberg 已提交
1028
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1029 1030 1031 1032 1033 1034 1035
	struct fast_reg_descriptor *desc = reg->mem_h;

	if (!reg->is_mr)
		return;

	reg->mem_h = NULL;
	reg->is_mr = 0;
S
Sagi Grimberg 已提交
1036 1037 1038
	spin_lock_bh(&ib_conn->lock);
	list_add_tail(&desc->list, &ib_conn->fastreg.pool);
	spin_unlock_bh(&ib_conn->lock);
1039 1040
}

1041
int iser_post_recvl(struct iser_conn *iser_conn)
1042 1043
{
	struct ib_recv_wr rx_wr, *rx_wr_failed;
S
Sagi Grimberg 已提交
1044
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1045 1046 1047
	struct ib_sge	  sge;
	int ib_ret;

1048
	sge.addr   = iser_conn->login_resp_dma;
1049
	sge.length = ISER_RX_LOGIN_SIZE;
S
Sagi Grimberg 已提交
1050
	sge.lkey   = ib_conn->device->mr->lkey;
1051

1052
	rx_wr.wr_id   = (unsigned long)iser_conn->login_resp_buf;
1053 1054 1055 1056
	rx_wr.sg_list = &sge;
	rx_wr.num_sge = 1;
	rx_wr.next    = NULL;

S
Sagi Grimberg 已提交
1057 1058
	ib_conn->post_recv_buf_count++;
	ib_ret	= ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
1059 1060
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1061
		ib_conn->post_recv_buf_count--;
1062 1063 1064 1065
	}
	return ib_ret;
}

1066
int iser_post_recvm(struct iser_conn *iser_conn, int count)
1067 1068 1069
{
	struct ib_recv_wr *rx_wr, *rx_wr_failed;
	int i, ib_ret;
S
Sagi Grimberg 已提交
1070
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1071
	unsigned int my_rx_head = iser_conn->rx_desc_head;
1072 1073
	struct iser_rx_desc *rx_desc;

S
Sagi Grimberg 已提交
1074
	for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1075
		rx_desc		= &iser_conn->rx_descs[my_rx_head];
1076 1077 1078 1079
		rx_wr->wr_id	= (unsigned long)rx_desc;
		rx_wr->sg_list	= &rx_desc->rx_sg;
		rx_wr->num_sge	= 1;
		rx_wr->next	= rx_wr + 1;
1080
		my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1081 1082 1083 1084 1085
	}

	rx_wr--;
	rx_wr->next = NULL; /* mark end of work requests list */

S
Sagi Grimberg 已提交
1086 1087
	ib_conn->post_recv_buf_count += count;
	ib_ret	= ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
1088 1089
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1090
		ib_conn->post_recv_buf_count -= count;
1091
	} else
1092
		iser_conn->rx_desc_head = my_rx_head;
1093 1094 1095 1096
	return ib_ret;
}


1097 1098 1099 1100 1101
/**
 * iser_start_send - Initiate a Send DTO operation
 *
 * returns 0 on success, -1 on failure
 */
S
Sagi Grimberg 已提交
1102
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
1103
{
1104
	int		  ib_ret;
1105 1106
	struct ib_send_wr send_wr, *send_wr_failed;

S
Sagi Grimberg 已提交
1107
	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1108 1109
				      tx_desc->dma_addr, ISER_HEADERS_LEN,
				      DMA_TO_DEVICE);
1110 1111 1112

	send_wr.next	   = NULL;
	send_wr.wr_id	   = (unsigned long)tx_desc;
1113 1114
	send_wr.sg_list	   = tx_desc->tx_sg;
	send_wr.num_sge	   = tx_desc->num_sge;
1115
	send_wr.opcode	   = IB_WR_SEND;
1116
	send_wr.send_flags = IB_SEND_SIGNALED;
1117

S
Sagi Grimberg 已提交
1118
	ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
1119
	if (ib_ret)
1120
		iser_err("ib_post_send failed, ret:%d\n", ib_ret);
1121

1122
	return ib_ret;
1123 1124
}

1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
/**
 * is_iser_tx_desc - Indicate if the completion wr_id
 *     is a TX descriptor or not.
 * @iser_conn: iser connection
 * @wr_id: completion WR identifier
 *
 * Since we cannot rely on wc opcode in FLUSH errors
 * we must work around it by checking if the wr_id address
 * falls in the iser connection rx_descs buffer. If so
 * it is an RX descriptor, otherwize it is a TX.
 */
static inline bool
is_iser_tx_desc(struct iser_conn *iser_conn, void *wr_id)
{
	void *start = iser_conn->rx_descs;
	int len = iser_conn->num_rx_descs * sizeof(*iser_conn->rx_descs);

	if (wr_id >= start && wr_id < start + len)
		return false;

	return true;
}

1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
/**
 * iser_handle_comp_error() - Handle error completion
 * @ib_conn:   connection RDMA resources
 * @wc:        work completion
 *
 * Notes: We may handle a FLUSH error completion and in this case
 *        we only cleanup in case TX type was DATAOUT. For non-FLUSH
 *        error completion we should also notify iscsi layer that
 *        connection is failed (in case we passed bind stage).
 */
static void
1159
iser_handle_comp_error(struct ib_conn *ib_conn,
1160
		       struct ib_wc *wc)
1161
{
1162 1163 1164 1165 1166 1167 1168 1169
	struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
						   ib_conn);

	if (wc->status != IB_WC_WR_FLUSH_ERR)
		if (iser_conn->iscsi_conn)
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);

1170 1171 1172 1173 1174 1175 1176 1177
	if (is_iser_tx_desc(iser_conn, (void *)wc->wr_id)) {
		struct iser_tx_desc *desc = (struct iser_tx_desc *)wc->wr_id;

		if (desc->type == ISCSI_TX_DATAOUT)
			kmem_cache_free(ig.desc_cache, desc);
	} else {
		ib_conn->post_recv_buf_count--;
	}
1178 1179
}

1180 1181 1182 1183 1184 1185 1186 1187 1188
/**
 * iser_handle_wc - handle a single work completion
 * @wc: work completion
 *
 * Soft-IRQ context, work completion can be either
 * SEND or RECV, and can turn out successful or
 * with error (or flush error).
 */
static void iser_handle_wc(struct ib_wc *wc)
1189
{
S
Sagi Grimberg 已提交
1190
	struct ib_conn *ib_conn;
1191 1192
	struct iser_tx_desc *tx_desc;
	struct iser_rx_desc *rx_desc;
1193

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
	ib_conn = wc->qp->qp_context;
	if (wc->status == IB_WC_SUCCESS) {
		if (wc->opcode == IB_WC_RECV) {
			rx_desc = (struct iser_rx_desc *)wc->wr_id;
			iser_rcv_completion(rx_desc, wc->byte_len,
					    ib_conn);
		} else
		if (wc->opcode == IB_WC_SEND) {
			tx_desc = (struct iser_tx_desc *)wc->wr_id;
			iser_snd_completion(tx_desc, ib_conn);
1204
		} else {
1205
			iser_err("Unknown wc opcode %d\n", wc->opcode);
1206
		}
1207 1208 1209 1210 1211 1212 1213
	} else {
		if (wc->status != IB_WC_WR_FLUSH_ERR)
			iser_err("wr id %llx status %d vend_err %x\n",
				 wc->wr_id, wc->status, wc->vendor_err);
		else
			iser_dbg("flush error: wr id %llx\n", wc->wr_id);

1214 1215
		if (wc->wr_id != ISER_FASTREG_LI_WRID &&
		    wc->wr_id != ISER_BEACON_WRID)
1216 1217 1218
			iser_handle_comp_error(ib_conn, wc);

		/* complete in case all flush errors were consumed */
1219
		if (wc->wr_id == ISER_BEACON_WRID)
1220
			complete(&ib_conn->flush_comp);
1221 1222 1223
	}
}

1224 1225 1226 1227 1228 1229 1230
/**
 * iser_cq_tasklet_fn - iSER completion polling loop
 * @data: iSER completion context
 *
 * Soft-IRQ context, polling connection CQ until
 * either CQ was empty or we exausted polling budget
 */
1231 1232
static void iser_cq_tasklet_fn(unsigned long data)
{
1233
	struct iser_comp *comp = (struct iser_comp *)data;
1234
	struct ib_cq *cq = comp->cq;
1235
	struct ib_wc wc;
1236
	int completed = 0;
1237 1238

	while (ib_poll_cq(cq, 1, &wc) == 1) {
1239 1240 1241
		iser_handle_wc(&wc);

		if (++completed >= iser_cq_poll_limit)
1242
			break;
1243
	}
1244 1245 1246 1247 1248

	/*
	 * It is assumed here that arming CQ only once its empty
	 * would not cause interrupts to be missed.
	 */
1249
	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1250

1251
	iser_dbg("got %d completions\n", completed);
1252 1253 1254 1255
}

static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
{
1256
	struct iser_comp *comp = cq_context;
1257

1258
	tasklet_schedule(&comp->tasklet);
1259
}
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284

u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
			     enum iser_data_dir cmd_dir, sector_t *sector)
{
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
	struct fast_reg_descriptor *desc = reg->mem_h;
	unsigned long sector_size = iser_task->sc->device->sector_size;
	struct ib_mr_status mr_status;
	int ret;

	if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
		desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
		ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
					 IB_MR_CHECK_SIG_STATUS, &mr_status);
		if (ret) {
			pr_err("ib_check_mr_status failed, ret %d\n", ret);
			goto err;
		}

		if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
			sector_t sector_off = mr_status.sig_err.sig_err_offset;

			do_div(sector_off, sector_size + 8);
			*sector = scsi_get_lba(iser_task->sc) + sector_off;

1285
			pr_err("PI error found type %d at sector %llx "
1286
			       "expected %x vs actual %x\n",
1287 1288
			       mr_status.sig_err.err_type,
			       (unsigned long long)*sector,
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
			       mr_status.sig_err.expected,
			       mr_status.sig_err.actual);

			switch (mr_status.sig_err.err_type) {
			case IB_SIG_BAD_GUARD:
				return 0x1;
			case IB_SIG_BAD_REFTAG:
				return 0x3;
			case IB_SIG_BAD_APPTAG:
				return 0x2;
			}
		}
	}

	return 0;
err:
	/* Not alot we can do here, return ambiguous guard error */
	return 0x1;
}