iser_verbs.c 33.9 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/module.h>
36
#include <linux/slab.h>
37 38 39 40 41
#include <linux/delay.h>

#include "iscsi_iser.h"

#define ISCSI_ISER_MAX_CONN	8
42 43
#define ISER_MAX_RX_LEN		(ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
#define ISER_MAX_TX_LEN		(ISER_QP_MAX_REQ_DTOS  * ISCSI_ISER_MAX_CONN)
44 45
#define ISER_MAX_CQ_LEN		(ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
				 ISCSI_ISER_MAX_CONN)
46

47 48
static int iser_cq_poll_limit = 512;

49 50 51 52 53
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);

static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
54 55
	iser_err("cq event %s (%d)\n",
		 ib_event_msg(cause->event), cause->event);
56 57 58 59
}

static void iser_qp_event_callback(struct ib_event *cause, void *context)
{
60 61
	iser_err("qp event %s (%d)\n",
		 ib_event_msg(cause->event), cause->event);
62 63
}

64 65 66
static void iser_event_handler(struct ib_event_handler *handler,
				struct ib_event *event)
{
67 68 69
	iser_err("async event %s (%d) on device %s port %d\n",
		 ib_event_msg(event->event), event->event,
		 event->device->name, event->element.port_num);
70 71
}

72 73 74 75 76 77 78 79 80
/**
 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
 * the adapator.
 *
 * returns 0 on success, -1 on failure
 */
static int iser_create_device_ib_res(struct iser_device *device)
{
81
	struct ib_device_attr *dev_attr = &device->dev_attr;
82
	int ret, i, max_cqe;
83

84 85
	ret = ib_query_device(device->ib_device, dev_attr);
	if (ret) {
86
		pr_warn("Query device failed for %s\n", device->ib_device->name);
87
		return ret;
88 89 90 91 92 93 94 95 96 97 98 99
	}

	/* Assign function handles  - based on FMR support */
	if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
	    device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
		iser_info("FMR supported, using FMR for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
		device->iser_free_rdma_reg_res = iser_free_fmr_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
	} else
	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
100 101 102 103 104
		iser_info("FastReg supported, using FastReg for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
		device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
105
	} else {
106
		iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
107
		return -1;
108
	}
109

110
	device->comps_used = min_t(int, num_online_cpus(),
111
				 device->ib_device->num_comp_vectors);
112

113 114 115 116 117
	device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
				GFP_KERNEL);
	if (!device->comps)
		goto comps_err;

118 119 120
	max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);

	iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
121
		  device->comps_used, device->ib_device->name,
122
		  device->ib_device->num_comp_vectors, max_cqe);
123

124 125 126 127
	device->pd = ib_alloc_pd(device->ib_device);
	if (IS_ERR(device->pd))
		goto pd_err;

128
	for (i = 0; i < device->comps_used; i++) {
129
		struct ib_cq_init_attr cq_attr = {};
130 131 132
		struct iser_comp *comp = &device->comps[i];

		comp->device = device;
133 134
		cq_attr.cqe = max_cqe;
		cq_attr.comp_vector = i;
135 136 137 138
		comp->cq = ib_create_cq(device->ib_device,
					iser_cq_callback,
					iser_cq_event_callback,
					(void *)comp,
139
					&cq_attr);
140 141
		if (IS_ERR(comp->cq)) {
			comp->cq = NULL;
142
			goto cq_err;
143
		}
144

145
		if (ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP))
146
			goto cq_err;
147

148 149
		tasklet_init(&comp->tasklet, iser_cq_tasklet_fn,
			     (unsigned long)comp);
150
	}
151

152 153 154
	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
				   IB_ACCESS_REMOTE_WRITE |
				   IB_ACCESS_REMOTE_READ);
155 156 157
	if (IS_ERR(device->mr))
		goto dma_mr_err;

158 159 160 161 162
	INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
				iser_event_handler);
	if (ib_register_event_handler(&device->event_handler))
		goto handler_err;

163 164
	return 0;

165 166
handler_err:
	ib_dereg_mr(device->mr);
167
dma_mr_err:
168 169
	for (i = 0; i < device->comps_used; i++)
		tasklet_kill(&device->comps[i].tasklet);
170
cq_err:
171 172 173
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

174 175
		if (comp->cq)
			ib_destroy_cq(comp->cq);
176
	}
177 178
	ib_dealloc_pd(device->pd);
pd_err:
179 180
	kfree(device->comps);
comps_err:
181 182 183 184 185
	iser_err("failed to allocate an IB resource\n");
	return -1;
}

/**
186
 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
187 188 189 190
 * CQ and PD created with the device associated with the adapator.
 */
static void iser_free_device_ib_res(struct iser_device *device)
{
191
	int i;
192 193
	BUG_ON(device->mr == NULL);

194 195 196 197
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

		tasklet_kill(&comp->tasklet);
198 199
		ib_destroy_cq(comp->cq);
		comp->cq = NULL;
200 201
	}

202
	(void)ib_unregister_event_handler(&device->event_handler);
203 204 205
	(void)ib_dereg_mr(device->mr);
	(void)ib_dealloc_pd(device->pd);

206 207 208
	kfree(device->comps);
	device->comps = NULL;

209 210 211 212 213
	device->mr = NULL;
	device->pd = NULL;
}

/**
214
 * iser_create_fmr_pool - Creates FMR pool and page_vector
215
 *
216
 * returns 0 on success, or errno code on failure
217
 */
S
Sagi Grimberg 已提交
218
int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
219
{
S
Sagi Grimberg 已提交
220
	struct iser_device *device = ib_conn->device;
221
	struct ib_fmr_pool_param params;
222
	int ret = -ENOMEM;
223

S
Sagi Grimberg 已提交
224
	ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
225 226
					(sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
					GFP_KERNEL);
S
Sagi Grimberg 已提交
227
	if (!ib_conn->fmr.page_vec)
228
		return ret;
229

S
Sagi Grimberg 已提交
230
	ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
231

232
	params.page_shift        = SHIFT_4K;
233 234 235 236 237
	/* when the first/last SG element are not start/end *
	 * page aligned, the map whould be of N+1 pages     */
	params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
	/* make the pool size twice the max number of SCSI commands *
	 * the ML is expected to queue, watermark for unmap at 50%  */
238 239
	params.pool_size	 = cmds_max * 2;
	params.dirty_watermark	 = cmds_max;
240 241 242 243 244 245
	params.cache		 = 0;
	params.flush_function	 = NULL;
	params.access		 = (IB_ACCESS_LOCAL_WRITE  |
				    IB_ACCESS_REMOTE_WRITE |
				    IB_ACCESS_REMOTE_READ);

S
Sagi Grimberg 已提交
246 247
	ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
	if (!IS_ERR(ib_conn->fmr.pool))
248 249 250
		return 0;

	/* no FMR => no need for page_vec */
S
Sagi Grimberg 已提交
251 252
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
253

S
Sagi Grimberg 已提交
254 255
	ret = PTR_ERR(ib_conn->fmr.pool);
	ib_conn->fmr.pool = NULL;
256 257 258 259
	if (ret != -ENOSYS) {
		iser_err("FMR allocation failed, err %d\n", ret);
		return ret;
	} else {
260
		iser_warn("FMRs are not supported, using unaligned mode\n");
261
		return 0;
262
	}
263 264 265 266 267
}

/**
 * iser_free_fmr_pool - releases the FMR pool and page vec
 */
S
Sagi Grimberg 已提交
268
void iser_free_fmr_pool(struct ib_conn *ib_conn)
269 270
{
	iser_info("freeing conn %p fmr pool %p\n",
S
Sagi Grimberg 已提交
271
		  ib_conn, ib_conn->fmr.pool);
272

S
Sagi Grimberg 已提交
273 274
	if (ib_conn->fmr.pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr.pool);
275

S
Sagi Grimberg 已提交
276
	ib_conn->fmr.pool = NULL;
277

S
Sagi Grimberg 已提交
278 279
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
280 281
}

282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
static int
iser_alloc_reg_res(struct ib_device *ib_device, struct ib_pd *pd,
		   struct iser_reg_resources *res)
{
	int ret;

	res->frpl = ib_alloc_fast_reg_page_list(ib_device,
						ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(res->frpl)) {
		ret = PTR_ERR(res->frpl);
		iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
			 ret);
		return PTR_ERR(res->frpl);
	}

	res->mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
			      ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(res->mr)) {
		ret = PTR_ERR(res->mr);
		iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
		goto fast_reg_mr_failure;
	}
	res->mr_valid = 1;

	return 0;

fast_reg_mr_failure:
	ib_free_fast_reg_page_list(res->frpl);

	return ret;
}

static void
iser_free_reg_res(struct iser_reg_resources *rsc)
{
	ib_dereg_mr(rsc->mr);
	ib_free_fast_reg_page_list(rsc->frpl);
}

321 322 323 324 325
static int
iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd,
		  struct fast_reg_descriptor *desc)
{
	struct iser_pi_context *pi_ctx = NULL;
S
Sagi Grimberg 已提交
326
	int ret;
327 328 329 330 331 332 333

	desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
	if (!desc->pi_ctx)
		return -ENOMEM;

	pi_ctx = desc->pi_ctx;

334 335 336 337
	ret = iser_alloc_reg_res(ib_device, pd, &pi_ctx->rsc);
	if (ret) {
		iser_err("failed to allocate reg_resources\n");
		goto alloc_reg_res_err;
338 339
	}

S
Sagi Grimberg 已提交
340
	pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
341 342 343 344
	if (IS_ERR(pi_ctx->sig_mr)) {
		ret = PTR_ERR(pi_ctx->sig_mr);
		goto sig_mr_failure;
	}
345 346
	pi_ctx->sig_mr_valid = 1;
	desc->pi_ctx->sig_protected = 0;
347 348 349 350

	return 0;

sig_mr_failure:
351 352
	iser_free_reg_res(&pi_ctx->rsc);
alloc_reg_res_err:
353 354 355 356 357 358 359 360
	kfree(desc->pi_ctx);

	return ret;
}

static void
iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
{
361
	iser_free_reg_res(&pi_ctx->rsc);
362
	ib_dereg_mr(pi_ctx->sig_mr);
363 364 365
	kfree(pi_ctx);
}

366 367
static int
iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
368
			 bool pi_enable, struct fast_reg_descriptor *desc)
369 370 371
{
	int ret;

372 373 374 375
	ret = iser_alloc_reg_res(ib_device, pd, &desc->rsc);
	if (ret) {
		iser_err("failed to allocate reg_resources\n");
		return ret;
376
	}
377 378

	if (pi_enable) {
379 380
		ret = iser_alloc_pi_ctx(ib_device, pd, desc);
		if (ret)
381 382
			goto pi_ctx_alloc_failure;
	}
383 384

	return 0;
385

386
pi_ctx_alloc_failure:
387
	iser_free_reg_res(&desc->rsc);
388 389 390 391

	return ret;
}

392
/**
393
 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
394 395 396
 * for fast registration work requests.
 * returns 0 on success, or errno code on failure
 */
S
Sagi Grimberg 已提交
397
int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
398
{
S
Sagi Grimberg 已提交
399 400
	struct iser_device *device = ib_conn->device;
	struct fast_reg_descriptor *desc;
401 402
	int i, ret;

S
Sagi Grimberg 已提交
403 404
	INIT_LIST_HEAD(&ib_conn->fastreg.pool);
	ib_conn->fastreg.pool_size = 0;
405
	for (i = 0; i < cmds_max; i++) {
406
		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
407 408 409 410 411 412
		if (!desc) {
			iser_err("Failed to allocate a new fast_reg descriptor\n");
			ret = -ENOMEM;
			goto err;
		}

413
		ret = iser_create_fastreg_desc(device->ib_device, device->pd,
S
Sagi Grimberg 已提交
414
					       ib_conn->pi_support, desc);
415 416 417 418 419
		if (ret) {
			iser_err("Failed to create fastreg descriptor err=%d\n",
				 ret);
			kfree(desc);
			goto err;
420 421
		}

S
Sagi Grimberg 已提交
422 423
		list_add_tail(&desc->list, &ib_conn->fastreg.pool);
		ib_conn->fastreg.pool_size++;
424 425 426
	}

	return 0;
427

428
err:
S
Sagi Grimberg 已提交
429
	iser_free_fastreg_pool(ib_conn);
430 431 432 433
	return ret;
}

/**
434
 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
435
 */
S
Sagi Grimberg 已提交
436
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
437 438 439 440
{
	struct fast_reg_descriptor *desc, *tmp;
	int i = 0;

S
Sagi Grimberg 已提交
441
	if (list_empty(&ib_conn->fastreg.pool))
442 443
		return;

S
Sagi Grimberg 已提交
444
	iser_info("freeing conn %p fr pool\n", ib_conn);
445

S
Sagi Grimberg 已提交
446
	list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
447
		list_del(&desc->list);
448
		iser_free_reg_res(&desc->rsc);
449 450
		if (desc->pi_ctx)
			iser_free_pi_ctx(desc->pi_ctx);
451 452 453 454
		kfree(desc);
		++i;
	}

S
Sagi Grimberg 已提交
455
	if (i < ib_conn->fastreg.pool_size)
456
		iser_warn("pool still has %d regions registered\n",
S
Sagi Grimberg 已提交
457
			  ib_conn->fastreg.pool_size - i);
458 459
}

460 461 462 463 464
/**
 * iser_create_ib_conn_res - Queue-Pair (QP)
 *
 * returns 0 on success, -1 on failure
 */
S
Sagi Grimberg 已提交
465
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
466
{
467 468
	struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
						   ib_conn);
469
	struct iser_device	*device;
470
	struct ib_device_attr *dev_attr;
471 472 473 474
	struct ib_qp_init_attr	init_attr;
	int			ret = -ENOMEM;
	int index, min_index = 0;

S
Sagi Grimberg 已提交
475
	BUG_ON(ib_conn->device == NULL);
476

S
Sagi Grimberg 已提交
477
	device = ib_conn->device;
478
	dev_attr = &device->dev_attr;
479 480 481

	memset(&init_attr, 0, sizeof init_attr);

482 483
	mutex_lock(&ig.connlist_mutex);
	/* select the CQ with the minimal number of usages */
484 485 486
	for (index = 0; index < device->comps_used; index++) {
		if (device->comps[index].active_qps <
		    device->comps[min_index].active_qps)
487
			min_index = index;
488 489 490
	}
	ib_conn->comp = &device->comps[min_index];
	ib_conn->comp->active_qps++;
491
	mutex_unlock(&ig.connlist_mutex);
S
Sagi Grimberg 已提交
492
	iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
493

494
	init_attr.event_handler = iser_qp_event_callback;
S
Sagi Grimberg 已提交
495
	init_attr.qp_context	= (void *)ib_conn;
496 497
	init_attr.send_cq	= ib_conn->comp->cq;
	init_attr.recv_cq	= ib_conn->comp->cq;
498
	init_attr.cap.max_recv_wr  = ISER_QP_MAX_RECV_DTOS;
499
	init_attr.cap.max_send_sge = 2;
500
	init_attr.cap.max_recv_sge = 1;
501 502
	init_attr.sq_sig_type	= IB_SIGNAL_REQ_WR;
	init_attr.qp_type	= IB_QPT_RC;
S
Sagi Grimberg 已提交
503
	if (ib_conn->pi_support) {
504
		init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
505
		init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
506 507
		iser_conn->max_cmds =
			ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
508
	} else {
509 510 511 512 513 514 515 516 517 518 519
		if (dev_attr->max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
			init_attr.cap.max_send_wr  = ISER_QP_MAX_REQ_DTOS + 1;
			iser_conn->max_cmds =
				ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
		} else {
			init_attr.cap.max_send_wr = dev_attr->max_qp_wr;
			iser_conn->max_cmds =
				ISER_GET_MAX_XMIT_CMDS(dev_attr->max_qp_wr);
			iser_dbg("device %s supports max_send_wr %d\n",
				 device->ib_device->name, dev_attr->max_qp_wr);
		}
520
	}
521

S
Sagi Grimberg 已提交
522
	ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
523
	if (ret)
524
		goto out_err;
525

S
Sagi Grimberg 已提交
526
	ib_conn->qp = ib_conn->cma_id->qp;
527
	iser_info("setting conn %p cma_id %p qp %p\n",
S
Sagi Grimberg 已提交
528 529
		  ib_conn, ib_conn->cma_id,
		  ib_conn->cma_id->qp);
530 531
	return ret;

532
out_err:
533 534 535
	mutex_lock(&ig.connlist_mutex);
	ib_conn->comp->active_qps--;
	mutex_unlock(&ig.connlist_mutex);
536
	iser_err("unable to alloc mem or create resource, err %d\n", ret);
537

538 539 540 541 542 543 544 545 546 547
	return ret;
}

/**
 * based on the resolved device node GUID see if there already allocated
 * device for this device. If there's no such, create one.
 */
static
struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
{
A
Arne Redlich 已提交
548
	struct iser_device *device;
549 550 551

	mutex_lock(&ig.device_list_mutex);

A
Arne Redlich 已提交
552
	list_for_each_entry(device, &ig.device_list, ig_list)
553 554
		/* find if there's a match using the node GUID */
		if (device->ib_device->node_guid == cma_id->device->node_guid)
555
			goto inc_refcnt;
A
Arne Redlich 已提交
556 557 558 559 560 561 562 563 564 565 566 567

	device = kzalloc(sizeof *device, GFP_KERNEL);
	if (device == NULL)
		goto out;

	/* assign this device to the device */
	device->ib_device = cma_id->device;
	/* init the device and link it into ig device list */
	if (iser_create_device_ib_res(device)) {
		kfree(device);
		device = NULL;
		goto out;
568
	}
A
Arne Redlich 已提交
569 570
	list_add(&device->ig_list, &ig.device_list);

571
inc_refcnt:
572
	device->refcount++;
573
out:
574 575 576 577 578 579 580 581 582
	mutex_unlock(&ig.device_list_mutex);
	return device;
}

/* if there's no demand for this device, release it */
static void iser_device_try_release(struct iser_device *device)
{
	mutex_lock(&ig.device_list_mutex);
	device->refcount--;
583
	iser_info("device %p refcount %d\n", device, device->refcount);
584 585 586 587 588 589 590 591
	if (!device->refcount) {
		iser_free_device_ib_res(device);
		list_del(&device->ig_list);
		kfree(device);
	}
	mutex_unlock(&ig.device_list_mutex);
}

592 593 594
/**
 * Called with state mutex held
 **/
595 596 597
static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
				     enum iser_conn_state comp,
				     enum iser_conn_state exch)
598 599 600
{
	int ret;

601 602 603 604
	ret = (iser_conn->state == comp);
	if (ret)
		iser_conn->state = exch;

605 606 607
	return ret;
}

608 609
void iser_release_work(struct work_struct *work)
{
610
	struct iser_conn *iser_conn;
611

612
	iser_conn = container_of(work, struct iser_conn, release_work);
613

614 615 616 617
	/* Wait for conn_stop to complete */
	wait_for_completion(&iser_conn->stop_completion);
	/* Wait for IB resouces cleanup to complete */
	wait_for_completion(&iser_conn->ib_completion);
618

619 620 621
	mutex_lock(&iser_conn->state_mutex);
	iser_conn->state = ISER_CONN_DOWN;
	mutex_unlock(&iser_conn->state_mutex);
622

623
	iser_conn_release(iser_conn);
624 625
}

626 627 628
/**
 * iser_free_ib_conn_res - release IB related resources
 * @iser_conn: iser connection struct
629 630 631
 * @destroy: indicator if we need to try to release the
 *     iser device and memory regoins pool (only iscsi
 *     shutdown and DEVICE_REMOVAL will use this).
632 633 634 635 636
 *
 * This routine is called with the iser state mutex held
 * so the cm_id removal is out of here. It is Safe to
 * be invoked multiple times.
 */
637
static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
638
				  bool destroy)
639 640 641 642 643 644 645 646
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;

	iser_info("freeing conn %p cma_id %p qp %p\n",
		  iser_conn, ib_conn->cma_id, ib_conn->qp);

	if (ib_conn->qp != NULL) {
647
		ib_conn->comp->active_qps--;
648 649 650 651
		rdma_destroy_qp(ib_conn->cma_id);
		ib_conn->qp = NULL;
	}

652 653 654 655 656 657 658 659
	if (destroy) {
		if (iser_conn->rx_descs)
			iser_free_rx_descriptors(iser_conn);

		if (device != NULL) {
			iser_device_try_release(device);
			ib_conn->device = NULL;
		}
660 661 662
	}
}

663 664 665
/**
 * Frees all conn objects and deallocs conn descriptor
 */
666
void iser_conn_release(struct iser_conn *iser_conn)
667
{
S
Sagi Grimberg 已提交
668
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
669 670

	mutex_lock(&ig.connlist_mutex);
671
	list_del(&iser_conn->conn_list);
672
	mutex_unlock(&ig.connlist_mutex);
673

674
	mutex_lock(&iser_conn->state_mutex);
675
	/* In case we endup here without ep_disconnect being invoked. */
676
	if (iser_conn->state != ISER_CONN_DOWN) {
677 678
		iser_warn("iser conn %p state %d, expected state down.\n",
			  iser_conn, iser_conn->state);
679
		iscsi_destroy_endpoint(iser_conn->ep);
680 681
		iser_conn->state = ISER_CONN_DOWN;
	}
682 683 684 685 686
	/*
	 * In case we never got to bind stage, we still need to
	 * release IB resources (which is safe to call more than once).
	 */
	iser_free_ib_conn_res(iser_conn, true);
687
	mutex_unlock(&iser_conn->state_mutex);
688

S
Sagi Grimberg 已提交
689 690 691
	if (ib_conn->cma_id != NULL) {
		rdma_destroy_id(ib_conn->cma_id);
		ib_conn->cma_id = NULL;
R
Roi Dayan 已提交
692
	}
S
Sagi Grimberg 已提交
693

694
	kfree(iser_conn);
695 696
}

697 698
/**
 * triggers start of the disconnect procedures and wait for them to be done
699
 * Called with state mutex held
700
 */
701
int iser_conn_terminate(struct iser_conn *iser_conn)
702
{
S
Sagi Grimberg 已提交
703
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
704
	struct ib_send_wr *bad_wr;
705 706
	int err = 0;

707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
	/* terminate the iser conn only if the conn state is UP */
	if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
				       ISER_CONN_TERMINATING))
		return 0;

	iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);

	/* suspend queuing of new iscsi commands */
	if (iser_conn->iscsi_conn)
		iscsi_suspend_queue(iser_conn->iscsi_conn);

	/*
	 * In case we didn't already clean up the cma_id (peer initiated
	 * a disconnection), we need to Cause the CMA to change the QP
	 * state to ERROR.
722
	 */
723 724 725 726 727 728
	if (ib_conn->cma_id) {
		err = rdma_disconnect(ib_conn->cma_id);
		if (err)
			iser_err("Failed to disconnect, conn: 0x%p err %d\n",
				 iser_conn, err);

729 730
		/* post an indication that all flush errors were consumed */
		err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr);
731
		if (err) {
732
			iser_err("conn %p failed to post beacon", ib_conn);
733 734
			return 1;
		}
735

736
		wait_for_completion(&ib_conn->flush_comp);
737
	}
738

739
	return 1;
740 741
}

742 743 744
/**
 * Called with state mutex held
 **/
745
static void iser_connect_error(struct rdma_cm_id *cma_id)
746
{
747
	struct iser_conn *iser_conn;
748

749
	iser_conn = (struct iser_conn *)cma_id->context;
750
	iser_conn->state = ISER_CONN_TERMINATING;
751 752
}

753 754 755
/**
 * Called with state mutex held
 **/
756
static void iser_addr_handler(struct rdma_cm_id *cma_id)
757 758
{
	struct iser_device *device;
759
	struct iser_conn   *iser_conn;
S
Sagi Grimberg 已提交
760
	struct ib_conn   *ib_conn;
761 762
	int    ret;

763 764
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
765 766 767
		/* bailout */
		return;

S
Sagi Grimberg 已提交
768
	ib_conn = &iser_conn->ib_conn;
769
	device = iser_device_find_by_ib_device(cma_id);
770 771
	if (!device) {
		iser_err("device lookup/creation failed\n");
772 773
		iser_connect_error(cma_id);
		return;
774 775
	}

S
Sagi Grimberg 已提交
776
	ib_conn->device = device;
777

778 779 780 781 782 783
	/* connection T10-PI support */
	if (iser_pi_enable) {
		if (!(device->dev_attr.device_cap_flags &
		      IB_DEVICE_SIGNATURE_HANDOVER)) {
			iser_warn("T10-PI requested but not supported on %s, "
				  "continue without T10-PI\n",
S
Sagi Grimberg 已提交
784 785
				  ib_conn->device->ib_device->name);
			ib_conn->pi_support = false;
786
		} else {
S
Sagi Grimberg 已提交
787
			ib_conn->pi_support = true;
788 789 790
		}
	}

791 792 793
	ret = rdma_resolve_route(cma_id, 1000);
	if (ret) {
		iser_err("resolve route failed: %d\n", ret);
794 795
		iser_connect_error(cma_id);
		return;
796 797 798
	}
}

799 800 801
/**
 * Called with state mutex held
 **/
802
static void iser_route_handler(struct rdma_cm_id *cma_id)
803 804 805
{
	struct rdma_conn_param conn_param;
	int    ret;
806
	struct iser_cm_hdr req_hdr;
807
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
S
Sagi Grimberg 已提交
808 809
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
810

811
	if (iser_conn->state != ISER_CONN_PENDING)
812 813 814
		/* bailout */
		return;

S
Sagi Grimberg 已提交
815
	ret = iser_create_ib_conn_res(ib_conn);
816 817 818 819
	if (ret)
		goto failure;

	memset(&conn_param, 0, sizeof conn_param);
820
	conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
821 822 823 824
	conn_param.initiator_depth     = 1;
	conn_param.retry_count	       = 7;
	conn_param.rnr_retry_count     = 6;

825 826 827 828 829 830
	memset(&req_hdr, 0, sizeof(req_hdr));
	req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
			ISER_SEND_W_INV_NOT_SUPPORTED);
	conn_param.private_data		= (void *)&req_hdr;
	conn_param.private_data_len	= sizeof(struct iser_cm_hdr);

831 832 833 834 835 836
	ret = rdma_connect(cma_id, &conn_param);
	if (ret) {
		iser_err("failure connecting: %d\n", ret);
		goto failure;
	}

837
	return;
838
failure:
839
	iser_connect_error(cma_id);
840 841 842 843
}

static void iser_connected_handler(struct rdma_cm_id *cma_id)
{
844
	struct iser_conn *iser_conn;
845 846 847
	struct ib_qp_attr attr;
	struct ib_qp_init_attr init_attr;

848 849
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
850 851 852
		/* bailout */
		return;

853 854
	(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
	iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
855

856 857
	iser_conn->state = ISER_CONN_UP;
	complete(&iser_conn->up_completion);
858 859
}

860
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
861
{
862
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
863

864
	if (iser_conn_terminate(iser_conn)) {
865
		if (iser_conn->iscsi_conn)
866 867
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);
868 869 870
		else
			iser_err("iscsi_iser connection isn't bound\n");
	}
871 872 873
}

static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
874
				 bool destroy)
875 876
{
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
877

878 879 880 881
	/*
	 * We are not guaranteed that we visited disconnected_handler
	 * by now, call it here to be safe that we handle CM drep
	 * and flush errors.
882
	 */
883
	iser_disconnected_handler(cma_id);
884
	iser_free_ib_conn_res(iser_conn, destroy);
885 886
	complete(&iser_conn->ib_completion);
};
887 888 889

static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
890
	struct iser_conn *iser_conn;
891
	int ret = 0;
892

893
	iser_conn = (struct iser_conn *)cma_id->context;
894 895 896
	iser_info("%s (%d): status %d conn %p id %p\n",
		  rdma_event_msg(event->event), event->event,
		  event->status, cma_id->context, cma_id);
897

898
	mutex_lock(&iser_conn->state_mutex);
899 900
	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
901
		iser_addr_handler(cma_id);
902 903
		break;
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
904
		iser_route_handler(cma_id);
905 906 907 908 909 910 911 912 913
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		iser_connected_handler(cma_id);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
	case RDMA_CM_EVENT_ROUTE_ERROR:
	case RDMA_CM_EVENT_CONNECT_ERROR:
	case RDMA_CM_EVENT_UNREACHABLE:
	case RDMA_CM_EVENT_REJECTED:
914
		iser_connect_error(cma_id);
915 916
		break;
	case RDMA_CM_EVENT_DISCONNECTED:
917
	case RDMA_CM_EVENT_ADDR_CHANGE:
918 919
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
		iser_cleanup_handler(cma_id, false);
920
		break;
921 922 923 924
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		/*
		 * we *must* destroy the device as we cannot rely
		 * on iscsid to be around to initiate error handling.
925 926
		 * also if we are not in state DOWN implicitly destroy
		 * the cma_id.
927 928
		 */
		iser_cleanup_handler(cma_id, true);
929 930 931 932
		if (iser_conn->state != ISER_CONN_DOWN) {
			iser_conn->ib_conn.cma_id = NULL;
			ret = 1;
		}
933
		break;
934
	default:
935 936
		iser_err("Unexpected RDMA CM event: %s (%d)\n",
			 rdma_event_msg(event->event), event->event);
937 938
		break;
	}
939
	mutex_unlock(&iser_conn->state_mutex);
940 941

	return ret;
942 943
}

944
void iser_conn_init(struct iser_conn *iser_conn)
945
{
946
	iser_conn->state = ISER_CONN_INIT;
S
Sagi Grimberg 已提交
947
	iser_conn->ib_conn.post_recv_buf_count = 0;
948
	init_completion(&iser_conn->ib_conn.flush_comp);
949
	init_completion(&iser_conn->stop_completion);
950
	init_completion(&iser_conn->ib_completion);
951 952
	init_completion(&iser_conn->up_completion);
	INIT_LIST_HEAD(&iser_conn->conn_list);
S
Sagi Grimberg 已提交
953
	spin_lock_init(&iser_conn->ib_conn.lock);
954
	mutex_init(&iser_conn->state_mutex);
955 956 957 958
}

 /**
 * starts the process of connecting to the target
959
 * sleeps until the connection is established or rejected
960
 */
961
int iser_connect(struct iser_conn   *iser_conn,
R
Roi Dayan 已提交
962 963
		 struct sockaddr    *src_addr,
		 struct sockaddr    *dst_addr,
964 965
		 int                 non_blocking)
{
S
Sagi Grimberg 已提交
966
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
967 968
	int err = 0;

969
	mutex_lock(&iser_conn->state_mutex);
970

971
	sprintf(iser_conn->name, "%pISp", dst_addr);
R
Roi Dayan 已提交
972

973
	iser_info("connecting to: %s\n", iser_conn->name);
974 975

	/* the device is known only --after-- address resolution */
S
Sagi Grimberg 已提交
976
	ib_conn->device = NULL;
977

978
	iser_conn->state = ISER_CONN_PENDING;
979

980 981 982
	ib_conn->beacon.wr_id = ISER_BEACON_WRID;
	ib_conn->beacon.opcode = IB_WR_SEND;

S
Sagi Grimberg 已提交
983 984 985 986 987
	ib_conn->cma_id = rdma_create_id(iser_cma_handler,
					 (void *)iser_conn,
					 RDMA_PS_TCP, IB_QPT_RC);
	if (IS_ERR(ib_conn->cma_id)) {
		err = PTR_ERR(ib_conn->cma_id);
988 989 990 991
		iser_err("rdma_create_id failed: %d\n", err);
		goto id_failure;
	}

S
Sagi Grimberg 已提交
992
	err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
993 994 995 996 997 998
	if (err) {
		iser_err("rdma_resolve_addr failed: %d\n", err);
		goto addr_failure;
	}

	if (!non_blocking) {
999
		wait_for_completion_interruptible(&iser_conn->up_completion);
1000

1001
		if (iser_conn->state != ISER_CONN_UP) {
1002 1003 1004 1005
			err =  -EIO;
			goto connect_failure;
		}
	}
1006
	mutex_unlock(&iser_conn->state_mutex);
1007 1008

	mutex_lock(&ig.connlist_mutex);
1009
	list_add(&iser_conn->conn_list, &ig.connlist);
1010 1011 1012 1013
	mutex_unlock(&ig.connlist_mutex);
	return 0;

id_failure:
S
Sagi Grimberg 已提交
1014
	ib_conn->cma_id = NULL;
1015
addr_failure:
1016
	iser_conn->state = ISER_CONN_DOWN;
1017
connect_failure:
1018 1019
	mutex_unlock(&iser_conn->state_mutex);
	iser_conn_release(iser_conn);
1020 1021 1022
	return err;
}

1023
int iser_post_recvl(struct iser_conn *iser_conn)
1024 1025
{
	struct ib_recv_wr rx_wr, *rx_wr_failed;
S
Sagi Grimberg 已提交
1026
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1027 1028 1029
	struct ib_sge	  sge;
	int ib_ret;

1030
	sge.addr   = iser_conn->login_resp_dma;
1031
	sge.length = ISER_RX_LOGIN_SIZE;
S
Sagi Grimberg 已提交
1032
	sge.lkey   = ib_conn->device->mr->lkey;
1033

S
Sagi Grimberg 已提交
1034
	rx_wr.wr_id   = (uintptr_t)iser_conn->login_resp_buf;
1035 1036 1037 1038
	rx_wr.sg_list = &sge;
	rx_wr.num_sge = 1;
	rx_wr.next    = NULL;

S
Sagi Grimberg 已提交
1039 1040
	ib_conn->post_recv_buf_count++;
	ib_ret	= ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
1041 1042
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1043
		ib_conn->post_recv_buf_count--;
1044 1045 1046 1047
	}
	return ib_ret;
}

1048
int iser_post_recvm(struct iser_conn *iser_conn, int count)
1049 1050 1051
{
	struct ib_recv_wr *rx_wr, *rx_wr_failed;
	int i, ib_ret;
S
Sagi Grimberg 已提交
1052
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1053
	unsigned int my_rx_head = iser_conn->rx_desc_head;
1054 1055
	struct iser_rx_desc *rx_desc;

S
Sagi Grimberg 已提交
1056
	for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1057
		rx_desc		= &iser_conn->rx_descs[my_rx_head];
S
Sagi Grimberg 已提交
1058
		rx_wr->wr_id	= (uintptr_t)rx_desc;
1059 1060 1061
		rx_wr->sg_list	= &rx_desc->rx_sg;
		rx_wr->num_sge	= 1;
		rx_wr->next	= rx_wr + 1;
1062
		my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1063 1064 1065 1066 1067
	}

	rx_wr--;
	rx_wr->next = NULL; /* mark end of work requests list */

S
Sagi Grimberg 已提交
1068 1069
	ib_conn->post_recv_buf_count += count;
	ib_ret	= ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
1070 1071
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1072
		ib_conn->post_recv_buf_count -= count;
1073
	} else
1074
		iser_conn->rx_desc_head = my_rx_head;
1075 1076 1077 1078
	return ib_ret;
}


1079 1080 1081 1082 1083
/**
 * iser_start_send - Initiate a Send DTO operation
 *
 * returns 0 on success, -1 on failure
 */
1084 1085
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
		   bool signal)
1086
{
1087
	int		  ib_ret;
1088 1089
	struct ib_send_wr send_wr, *send_wr_failed;

S
Sagi Grimberg 已提交
1090
	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1091 1092
				      tx_desc->dma_addr, ISER_HEADERS_LEN,
				      DMA_TO_DEVICE);
1093 1094

	send_wr.next	   = NULL;
S
Sagi Grimberg 已提交
1095
	send_wr.wr_id	   = (uintptr_t)tx_desc;
1096 1097
	send_wr.sg_list	   = tx_desc->tx_sg;
	send_wr.num_sge	   = tx_desc->num_sge;
1098
	send_wr.opcode	   = IB_WR_SEND;
1099
	send_wr.send_flags = signal ? IB_SEND_SIGNALED : 0;
1100

S
Sagi Grimberg 已提交
1101
	ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
1102
	if (ib_ret)
1103
		iser_err("ib_post_send failed, ret:%d\n", ib_ret);
1104

1105
	return ib_ret;
1106 1107
}

1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
/**
 * is_iser_tx_desc - Indicate if the completion wr_id
 *     is a TX descriptor or not.
 * @iser_conn: iser connection
 * @wr_id: completion WR identifier
 *
 * Since we cannot rely on wc opcode in FLUSH errors
 * we must work around it by checking if the wr_id address
 * falls in the iser connection rx_descs buffer. If so
 * it is an RX descriptor, otherwize it is a TX.
 */
static inline bool
is_iser_tx_desc(struct iser_conn *iser_conn, void *wr_id)
{
	void *start = iser_conn->rx_descs;
	int len = iser_conn->num_rx_descs * sizeof(*iser_conn->rx_descs);

	if (wr_id >= start && wr_id < start + len)
		return false;

	return true;
}

1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
/**
 * iser_handle_comp_error() - Handle error completion
 * @ib_conn:   connection RDMA resources
 * @wc:        work completion
 *
 * Notes: We may handle a FLUSH error completion and in this case
 *        we only cleanup in case TX type was DATAOUT. For non-FLUSH
 *        error completion we should also notify iscsi layer that
 *        connection is failed (in case we passed bind stage).
 */
static void
1142
iser_handle_comp_error(struct ib_conn *ib_conn,
1143
		       struct ib_wc *wc)
1144
{
S
Sagi Grimberg 已提交
1145
	void *wr_id = (void *)(uintptr_t)wc->wr_id;
1146 1147 1148 1149 1150 1151 1152 1153
	struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
						   ib_conn);

	if (wc->status != IB_WC_WR_FLUSH_ERR)
		if (iser_conn->iscsi_conn)
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);

1154 1155 1156
	if (wc->wr_id == ISER_FASTREG_LI_WRID)
		return;

S
Sagi Grimberg 已提交
1157 1158
	if (is_iser_tx_desc(iser_conn, wr_id)) {
		struct iser_tx_desc *desc = wr_id;
1159 1160 1161 1162 1163 1164

		if (desc->type == ISCSI_TX_DATAOUT)
			kmem_cache_free(ig.desc_cache, desc);
	} else {
		ib_conn->post_recv_buf_count--;
	}
1165 1166
}

1167 1168 1169 1170 1171 1172 1173 1174 1175
/**
 * iser_handle_wc - handle a single work completion
 * @wc: work completion
 *
 * Soft-IRQ context, work completion can be either
 * SEND or RECV, and can turn out successful or
 * with error (or flush error).
 */
static void iser_handle_wc(struct ib_wc *wc)
1176
{
S
Sagi Grimberg 已提交
1177
	struct ib_conn *ib_conn;
1178 1179
	struct iser_tx_desc *tx_desc;
	struct iser_rx_desc *rx_desc;
1180

1181
	ib_conn = wc->qp->qp_context;
1182
	if (likely(wc->status == IB_WC_SUCCESS)) {
1183
		if (wc->opcode == IB_WC_RECV) {
S
Sagi Grimberg 已提交
1184
			rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
1185 1186 1187 1188
			iser_rcv_completion(rx_desc, wc->byte_len,
					    ib_conn);
		} else
		if (wc->opcode == IB_WC_SEND) {
S
Sagi Grimberg 已提交
1189
			tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
1190
			iser_snd_completion(tx_desc, ib_conn);
1191
		} else {
1192
			iser_err("Unknown wc opcode %d\n", wc->opcode);
1193
		}
1194 1195
	} else {
		if (wc->status != IB_WC_WR_FLUSH_ERR)
1196 1197 1198
			iser_err("%s (%d): wr id %llx vend_err %x\n",
				 ib_wc_status_msg(wc->status), wc->status,
				 wc->wr_id, wc->vendor_err);
1199
		else
1200 1201 1202
			iser_dbg("%s (%d): wr id %llx\n",
				 ib_wc_status_msg(wc->status), wc->status,
				 wc->wr_id);
1203

1204
		if (wc->wr_id == ISER_BEACON_WRID)
1205
			/* all flush errors were consumed */
1206
			complete(&ib_conn->flush_comp);
1207 1208
		else
			iser_handle_comp_error(ib_conn, wc);
1209 1210 1211
	}
}

1212 1213 1214 1215 1216 1217 1218
/**
 * iser_cq_tasklet_fn - iSER completion polling loop
 * @data: iSER completion context
 *
 * Soft-IRQ context, polling connection CQ until
 * either CQ was empty or we exausted polling budget
 */
1219 1220
static void iser_cq_tasklet_fn(unsigned long data)
{
1221
	struct iser_comp *comp = (struct iser_comp *)data;
1222
	struct ib_cq *cq = comp->cq;
1223 1224
	struct ib_wc *const wcs = comp->wcs;
	int i, n, completed = 0;
1225

1226 1227 1228
	while ((n = ib_poll_cq(cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
		for (i = 0; i < n; i++)
			iser_handle_wc(&wcs[i]);
1229

1230 1231
		completed += n;
		if (completed >= iser_cq_poll_limit)
1232
			break;
1233
	}
1234 1235 1236 1237 1238

	/*
	 * It is assumed here that arming CQ only once its empty
	 * would not cause interrupts to be missed.
	 */
1239
	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1240

1241
	iser_dbg("got %d completions\n", completed);
1242 1243 1244 1245
}

static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
{
1246
	struct iser_comp *comp = cq_context;
1247

1248
	tasklet_schedule(&comp->tasklet);
1249
}
1250 1251 1252 1253

u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
			     enum iser_data_dir cmd_dir, sector_t *sector)
{
1254
	struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
1255 1256 1257 1258 1259
	struct fast_reg_descriptor *desc = reg->mem_h;
	unsigned long sector_size = iser_task->sc->device->sector_size;
	struct ib_mr_status mr_status;
	int ret;

1260 1261
	if (desc && desc->pi_ctx->sig_protected) {
		desc->pi_ctx->sig_protected = 0;
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
		ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
					 IB_MR_CHECK_SIG_STATUS, &mr_status);
		if (ret) {
			pr_err("ib_check_mr_status failed, ret %d\n", ret);
			goto err;
		}

		if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
			sector_t sector_off = mr_status.sig_err.sig_err_offset;

			do_div(sector_off, sector_size + 8);
			*sector = scsi_get_lba(iser_task->sc) + sector_off;

1275
			pr_err("PI error found type %d at sector %llx "
1276
			       "expected %x vs actual %x\n",
1277 1278
			       mr_status.sig_err.err_type,
			       (unsigned long long)*sector,
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
			       mr_status.sig_err.expected,
			       mr_status.sig_err.actual);

			switch (mr_status.sig_err.err_type) {
			case IB_SIG_BAD_GUARD:
				return 0x1;
			case IB_SIG_BAD_REFTAG:
				return 0x3;
			case IB_SIG_BAD_APPTAG:
				return 0x2;
			}
		}
	}

	return 0;
err:
	/* Not alot we can do here, return ambiguous guard error */
	return 0x1;
}