iser_verbs.c 34.1 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/module.h>
36
#include <linux/slab.h>
37 38 39 40 41
#include <linux/delay.h>

#include "iscsi_iser.h"

#define ISCSI_ISER_MAX_CONN	8
42 43
#define ISER_MAX_RX_LEN		(ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
#define ISER_MAX_TX_LEN		(ISER_QP_MAX_REQ_DTOS  * ISCSI_ISER_MAX_CONN)
44 45
#define ISER_MAX_CQ_LEN		(ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
				 ISCSI_ISER_MAX_CONN)
46

47 48
static int iser_cq_poll_limit = 512;

49 50 51 52 53
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);

static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
54 55
	iser_err("cq event %s (%d)\n",
		 ib_event_msg(cause->event), cause->event);
56 57 58 59
}

static void iser_qp_event_callback(struct ib_event *cause, void *context)
{
60 61
	iser_err("qp event %s (%d)\n",
		 ib_event_msg(cause->event), cause->event);
62 63
}

64 65 66
static void iser_event_handler(struct ib_event_handler *handler,
				struct ib_event *event)
{
67 68 69
	iser_err("async event %s (%d) on device %s port %d\n",
		 ib_event_msg(event->event), event->event,
		 event->device->name, event->element.port_num);
70 71
}

72 73 74 75 76 77 78 79 80
/**
 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
 * the adapator.
 *
 * returns 0 on success, -1 on failure
 */
static int iser_create_device_ib_res(struct iser_device *device)
{
81
	struct ib_device_attr *dev_attr = &device->dev_attr;
82
	int ret, i, max_cqe;
83

84 85
	ret = ib_query_device(device->ib_device, dev_attr);
	if (ret) {
86
		pr_warn("Query device failed for %s\n", device->ib_device->name);
87
		return ret;
88 89 90 91 92 93 94 95 96 97 98 99
	}

	/* Assign function handles  - based on FMR support */
	if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
	    device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
		iser_info("FMR supported, using FMR for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
		device->iser_free_rdma_reg_res = iser_free_fmr_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
	} else
	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
100 101 102 103 104
		iser_info("FastReg supported, using FastReg for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
		device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
105
	} else {
106
		iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
107
		return -1;
108
	}
109

110
	device->comps_used = min_t(int, num_online_cpus(),
111
				 device->ib_device->num_comp_vectors);
112

113 114 115 116 117
	device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
				GFP_KERNEL);
	if (!device->comps)
		goto comps_err;

118 119 120
	max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);

	iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
121
		  device->comps_used, device->ib_device->name,
122
		  device->ib_device->num_comp_vectors, max_cqe);
123

124 125 126 127
	device->pd = ib_alloc_pd(device->ib_device);
	if (IS_ERR(device->pd))
		goto pd_err;

128
	for (i = 0; i < device->comps_used; i++) {
129
		struct ib_cq_init_attr cq_attr = {};
130 131 132
		struct iser_comp *comp = &device->comps[i];

		comp->device = device;
133 134
		cq_attr.cqe = max_cqe;
		cq_attr.comp_vector = i;
135 136 137 138
		comp->cq = ib_create_cq(device->ib_device,
					iser_cq_callback,
					iser_cq_event_callback,
					(void *)comp,
139
					&cq_attr);
140 141
		if (IS_ERR(comp->cq)) {
			comp->cq = NULL;
142
			goto cq_err;
143
		}
144

145
		if (ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP))
146
			goto cq_err;
147

148 149
		tasklet_init(&comp->tasklet, iser_cq_tasklet_fn,
			     (unsigned long)comp);
150
	}
151

152 153 154
	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
				   IB_ACCESS_REMOTE_WRITE |
				   IB_ACCESS_REMOTE_READ);
155 156 157
	if (IS_ERR(device->mr))
		goto dma_mr_err;

158 159 160 161 162
	INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
				iser_event_handler);
	if (ib_register_event_handler(&device->event_handler))
		goto handler_err;

163 164
	return 0;

165 166
handler_err:
	ib_dereg_mr(device->mr);
167
dma_mr_err:
168 169
	for (i = 0; i < device->comps_used; i++)
		tasklet_kill(&device->comps[i].tasklet);
170
cq_err:
171 172 173
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

174 175
		if (comp->cq)
			ib_destroy_cq(comp->cq);
176
	}
177 178
	ib_dealloc_pd(device->pd);
pd_err:
179 180
	kfree(device->comps);
comps_err:
181 182 183 184 185
	iser_err("failed to allocate an IB resource\n");
	return -1;
}

/**
186
 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
187 188 189 190
 * CQ and PD created with the device associated with the adapator.
 */
static void iser_free_device_ib_res(struct iser_device *device)
{
191
	int i;
192 193
	BUG_ON(device->mr == NULL);

194 195 196 197
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

		tasklet_kill(&comp->tasklet);
198 199
		ib_destroy_cq(comp->cq);
		comp->cq = NULL;
200 201
	}

202
	(void)ib_unregister_event_handler(&device->event_handler);
203 204 205
	(void)ib_dereg_mr(device->mr);
	(void)ib_dealloc_pd(device->pd);

206 207 208
	kfree(device->comps);
	device->comps = NULL;

209 210 211 212 213
	device->mr = NULL;
	device->pd = NULL;
}

/**
214
 * iser_create_fmr_pool - Creates FMR pool and page_vector
215
 *
216
 * returns 0 on success, or errno code on failure
217
 */
S
Sagi Grimberg 已提交
218
int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
219
{
S
Sagi Grimberg 已提交
220
	struct iser_device *device = ib_conn->device;
221
	struct ib_fmr_pool_param params;
222
	int ret = -ENOMEM;
223

S
Sagi Grimberg 已提交
224
	ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
225 226
					(sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
					GFP_KERNEL);
S
Sagi Grimberg 已提交
227
	if (!ib_conn->fmr.page_vec)
228
		return ret;
229

S
Sagi Grimberg 已提交
230
	ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
231

232
	params.page_shift        = SHIFT_4K;
233 234 235 236 237
	/* when the first/last SG element are not start/end *
	 * page aligned, the map whould be of N+1 pages     */
	params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
	/* make the pool size twice the max number of SCSI commands *
	 * the ML is expected to queue, watermark for unmap at 50%  */
238 239
	params.pool_size	 = cmds_max * 2;
	params.dirty_watermark	 = cmds_max;
240 241 242 243 244 245
	params.cache		 = 0;
	params.flush_function	 = NULL;
	params.access		 = (IB_ACCESS_LOCAL_WRITE  |
				    IB_ACCESS_REMOTE_WRITE |
				    IB_ACCESS_REMOTE_READ);

S
Sagi Grimberg 已提交
246 247
	ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
	if (!IS_ERR(ib_conn->fmr.pool))
248 249 250
		return 0;

	/* no FMR => no need for page_vec */
S
Sagi Grimberg 已提交
251 252
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
253

S
Sagi Grimberg 已提交
254 255
	ret = PTR_ERR(ib_conn->fmr.pool);
	ib_conn->fmr.pool = NULL;
256 257 258 259
	if (ret != -ENOSYS) {
		iser_err("FMR allocation failed, err %d\n", ret);
		return ret;
	} else {
260
		iser_warn("FMRs are not supported, using unaligned mode\n");
261
		return 0;
262
	}
263 264 265 266 267
}

/**
 * iser_free_fmr_pool - releases the FMR pool and page vec
 */
S
Sagi Grimberg 已提交
268
void iser_free_fmr_pool(struct ib_conn *ib_conn)
269 270
{
	iser_info("freeing conn %p fmr pool %p\n",
S
Sagi Grimberg 已提交
271
		  ib_conn, ib_conn->fmr.pool);
272

S
Sagi Grimberg 已提交
273 274
	if (ib_conn->fmr.pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr.pool);
275

S
Sagi Grimberg 已提交
276
	ib_conn->fmr.pool = NULL;
277

S
Sagi Grimberg 已提交
278 279
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
280 281
}

282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
static int
iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd,
		  struct fast_reg_descriptor *desc)
{
	struct iser_pi_context *pi_ctx = NULL;
	struct ib_mr_init_attr mr_init_attr = {.max_reg_descriptors = 2,
					       .flags = IB_MR_SIGNATURE_EN};
	int ret = 0;

	desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
	if (!desc->pi_ctx)
		return -ENOMEM;

	pi_ctx = desc->pi_ctx;

	pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
					    ISCSI_ISER_SG_TABLESIZE);
	if (IS_ERR(pi_ctx->prot_frpl)) {
		ret = PTR_ERR(pi_ctx->prot_frpl);
		goto prot_frpl_failure;
	}

	pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
					ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(pi_ctx->prot_mr)) {
		ret = PTR_ERR(pi_ctx->prot_mr);
		goto prot_mr_failure;
	}
	desc->reg_indicators |= ISER_PROT_KEY_VALID;

	pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
	if (IS_ERR(pi_ctx->sig_mr)) {
		ret = PTR_ERR(pi_ctx->sig_mr);
		goto sig_mr_failure;
	}
	desc->reg_indicators |= ISER_SIG_KEY_VALID;
	desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;

	return 0;

sig_mr_failure:
	ib_dereg_mr(desc->pi_ctx->prot_mr);
prot_mr_failure:
	ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
prot_frpl_failure:
	kfree(desc->pi_ctx);

	return ret;
}

static void
iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
{
	ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
	ib_dereg_mr(pi_ctx->prot_mr);
	ib_destroy_mr(pi_ctx->sig_mr);
	kfree(pi_ctx);
}

341 342
static int
iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
343
			 bool pi_enable, struct fast_reg_descriptor *desc)
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
{
	int ret;

	desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
						      ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_frpl)) {
		ret = PTR_ERR(desc->data_frpl);
		iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
			 ret);
		return PTR_ERR(desc->data_frpl);
	}

	desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_mr)) {
		ret = PTR_ERR(desc->data_mr);
		iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
		goto fast_reg_mr_failure;
	}
362 363 364
	desc->reg_indicators |= ISER_DATA_KEY_VALID;

	if (pi_enable) {
365 366
		ret = iser_alloc_pi_ctx(ib_device, pd, desc);
		if (ret)
367 368
			goto pi_ctx_alloc_failure;
	}
369 370

	return 0;
371 372
pi_ctx_alloc_failure:
	ib_dereg_mr(desc->data_mr);
373 374 375 376 377 378
fast_reg_mr_failure:
	ib_free_fast_reg_page_list(desc->data_frpl);

	return ret;
}

379
/**
380
 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
381 382 383
 * for fast registration work requests.
 * returns 0 on success, or errno code on failure
 */
S
Sagi Grimberg 已提交
384
int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
385
{
S
Sagi Grimberg 已提交
386 387
	struct iser_device *device = ib_conn->device;
	struct fast_reg_descriptor *desc;
388 389
	int i, ret;

S
Sagi Grimberg 已提交
390 391
	INIT_LIST_HEAD(&ib_conn->fastreg.pool);
	ib_conn->fastreg.pool_size = 0;
392
	for (i = 0; i < cmds_max; i++) {
393
		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
394 395 396 397 398 399
		if (!desc) {
			iser_err("Failed to allocate a new fast_reg descriptor\n");
			ret = -ENOMEM;
			goto err;
		}

400
		ret = iser_create_fastreg_desc(device->ib_device, device->pd,
S
Sagi Grimberg 已提交
401
					       ib_conn->pi_support, desc);
402 403 404 405 406
		if (ret) {
			iser_err("Failed to create fastreg descriptor err=%d\n",
				 ret);
			kfree(desc);
			goto err;
407 408
		}

S
Sagi Grimberg 已提交
409 410
		list_add_tail(&desc->list, &ib_conn->fastreg.pool);
		ib_conn->fastreg.pool_size++;
411 412 413
	}

	return 0;
414

415
err:
S
Sagi Grimberg 已提交
416
	iser_free_fastreg_pool(ib_conn);
417 418 419 420
	return ret;
}

/**
421
 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
422
 */
S
Sagi Grimberg 已提交
423
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
424 425 426 427
{
	struct fast_reg_descriptor *desc, *tmp;
	int i = 0;

S
Sagi Grimberg 已提交
428
	if (list_empty(&ib_conn->fastreg.pool))
429 430
		return;

S
Sagi Grimberg 已提交
431
	iser_info("freeing conn %p fr pool\n", ib_conn);
432

S
Sagi Grimberg 已提交
433
	list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
434 435 436
		list_del(&desc->list);
		ib_free_fast_reg_page_list(desc->data_frpl);
		ib_dereg_mr(desc->data_mr);
437 438
		if (desc->pi_ctx)
			iser_free_pi_ctx(desc->pi_ctx);
439 440 441 442
		kfree(desc);
		++i;
	}

S
Sagi Grimberg 已提交
443
	if (i < ib_conn->fastreg.pool_size)
444
		iser_warn("pool still has %d regions registered\n",
S
Sagi Grimberg 已提交
445
			  ib_conn->fastreg.pool_size - i);
446 447
}

448 449 450 451 452
/**
 * iser_create_ib_conn_res - Queue-Pair (QP)
 *
 * returns 0 on success, -1 on failure
 */
S
Sagi Grimberg 已提交
453
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
454
{
455 456
	struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
						   ib_conn);
457
	struct iser_device	*device;
458
	struct ib_device_attr *dev_attr;
459 460 461 462
	struct ib_qp_init_attr	init_attr;
	int			ret = -ENOMEM;
	int index, min_index = 0;

S
Sagi Grimberg 已提交
463
	BUG_ON(ib_conn->device == NULL);
464

S
Sagi Grimberg 已提交
465
	device = ib_conn->device;
466
	dev_attr = &device->dev_attr;
467 468 469

	memset(&init_attr, 0, sizeof init_attr);

470 471
	mutex_lock(&ig.connlist_mutex);
	/* select the CQ with the minimal number of usages */
472 473 474
	for (index = 0; index < device->comps_used; index++) {
		if (device->comps[index].active_qps <
		    device->comps[min_index].active_qps)
475
			min_index = index;
476 477 478
	}
	ib_conn->comp = &device->comps[min_index];
	ib_conn->comp->active_qps++;
479
	mutex_unlock(&ig.connlist_mutex);
S
Sagi Grimberg 已提交
480
	iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
481

482
	init_attr.event_handler = iser_qp_event_callback;
S
Sagi Grimberg 已提交
483
	init_attr.qp_context	= (void *)ib_conn;
484 485
	init_attr.send_cq	= ib_conn->comp->cq;
	init_attr.recv_cq	= ib_conn->comp->cq;
486
	init_attr.cap.max_recv_wr  = ISER_QP_MAX_RECV_DTOS;
487
	init_attr.cap.max_send_sge = 2;
488
	init_attr.cap.max_recv_sge = 1;
489 490
	init_attr.sq_sig_type	= IB_SIGNAL_REQ_WR;
	init_attr.qp_type	= IB_QPT_RC;
S
Sagi Grimberg 已提交
491
	if (ib_conn->pi_support) {
492
		init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
493
		init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
494 495
		iser_conn->max_cmds =
			ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
496
	} else {
497 498 499 500 501 502 503 504 505 506 507
		if (dev_attr->max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
			init_attr.cap.max_send_wr  = ISER_QP_MAX_REQ_DTOS + 1;
			iser_conn->max_cmds =
				ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
		} else {
			init_attr.cap.max_send_wr = dev_attr->max_qp_wr;
			iser_conn->max_cmds =
				ISER_GET_MAX_XMIT_CMDS(dev_attr->max_qp_wr);
			iser_dbg("device %s supports max_send_wr %d\n",
				 device->ib_device->name, dev_attr->max_qp_wr);
		}
508
	}
509

S
Sagi Grimberg 已提交
510
	ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
511
	if (ret)
512
		goto out_err;
513

S
Sagi Grimberg 已提交
514
	ib_conn->qp = ib_conn->cma_id->qp;
515
	iser_info("setting conn %p cma_id %p qp %p\n",
S
Sagi Grimberg 已提交
516 517
		  ib_conn, ib_conn->cma_id,
		  ib_conn->cma_id->qp);
518 519
	return ret;

520
out_err:
521 522 523
	mutex_lock(&ig.connlist_mutex);
	ib_conn->comp->active_qps--;
	mutex_unlock(&ig.connlist_mutex);
524
	iser_err("unable to alloc mem or create resource, err %d\n", ret);
525

526 527 528 529 530 531 532 533 534 535
	return ret;
}

/**
 * based on the resolved device node GUID see if there already allocated
 * device for this device. If there's no such, create one.
 */
static
struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
{
A
Arne Redlich 已提交
536
	struct iser_device *device;
537 538 539

	mutex_lock(&ig.device_list_mutex);

A
Arne Redlich 已提交
540
	list_for_each_entry(device, &ig.device_list, ig_list)
541 542
		/* find if there's a match using the node GUID */
		if (device->ib_device->node_guid == cma_id->device->node_guid)
543
			goto inc_refcnt;
A
Arne Redlich 已提交
544 545 546 547 548 549 550 551 552 553 554 555

	device = kzalloc(sizeof *device, GFP_KERNEL);
	if (device == NULL)
		goto out;

	/* assign this device to the device */
	device->ib_device = cma_id->device;
	/* init the device and link it into ig device list */
	if (iser_create_device_ib_res(device)) {
		kfree(device);
		device = NULL;
		goto out;
556
	}
A
Arne Redlich 已提交
557 558
	list_add(&device->ig_list, &ig.device_list);

559
inc_refcnt:
560
	device->refcount++;
561
out:
562 563 564 565 566 567 568 569 570
	mutex_unlock(&ig.device_list_mutex);
	return device;
}

/* if there's no demand for this device, release it */
static void iser_device_try_release(struct iser_device *device)
{
	mutex_lock(&ig.device_list_mutex);
	device->refcount--;
571
	iser_info("device %p refcount %d\n", device, device->refcount);
572 573 574 575 576 577 578 579
	if (!device->refcount) {
		iser_free_device_ib_res(device);
		list_del(&device->ig_list);
		kfree(device);
	}
	mutex_unlock(&ig.device_list_mutex);
}

580 581 582
/**
 * Called with state mutex held
 **/
583 584 585
static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
				     enum iser_conn_state comp,
				     enum iser_conn_state exch)
586 587 588
{
	int ret;

589 590 591 592
	ret = (iser_conn->state == comp);
	if (ret)
		iser_conn->state = exch;

593 594 595
	return ret;
}

596 597
void iser_release_work(struct work_struct *work)
{
598
	struct iser_conn *iser_conn;
599

600
	iser_conn = container_of(work, struct iser_conn, release_work);
601

602 603 604 605
	/* Wait for conn_stop to complete */
	wait_for_completion(&iser_conn->stop_completion);
	/* Wait for IB resouces cleanup to complete */
	wait_for_completion(&iser_conn->ib_completion);
606

607 608 609
	mutex_lock(&iser_conn->state_mutex);
	iser_conn->state = ISER_CONN_DOWN;
	mutex_unlock(&iser_conn->state_mutex);
610

611
	iser_conn_release(iser_conn);
612 613
}

614 615 616
/**
 * iser_free_ib_conn_res - release IB related resources
 * @iser_conn: iser connection struct
617 618 619
 * @destroy: indicator if we need to try to release the
 *     iser device and memory regoins pool (only iscsi
 *     shutdown and DEVICE_REMOVAL will use this).
620 621 622 623 624
 *
 * This routine is called with the iser state mutex held
 * so the cm_id removal is out of here. It is Safe to
 * be invoked multiple times.
 */
625
static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
626
				  bool destroy)
627 628 629 630 631 632 633 634
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;

	iser_info("freeing conn %p cma_id %p qp %p\n",
		  iser_conn, ib_conn->cma_id, ib_conn->qp);

	if (ib_conn->qp != NULL) {
635
		ib_conn->comp->active_qps--;
636 637 638 639
		rdma_destroy_qp(ib_conn->cma_id);
		ib_conn->qp = NULL;
	}

640 641 642 643 644 645 646 647
	if (destroy) {
		if (iser_conn->rx_descs)
			iser_free_rx_descriptors(iser_conn);

		if (device != NULL) {
			iser_device_try_release(device);
			ib_conn->device = NULL;
		}
648 649 650
	}
}

651 652 653
/**
 * Frees all conn objects and deallocs conn descriptor
 */
654
void iser_conn_release(struct iser_conn *iser_conn)
655
{
S
Sagi Grimberg 已提交
656
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
657 658

	mutex_lock(&ig.connlist_mutex);
659
	list_del(&iser_conn->conn_list);
660
	mutex_unlock(&ig.connlist_mutex);
661

662
	mutex_lock(&iser_conn->state_mutex);
663
	/* In case we endup here without ep_disconnect being invoked. */
664
	if (iser_conn->state != ISER_CONN_DOWN) {
665 666
		iser_warn("iser conn %p state %d, expected state down.\n",
			  iser_conn, iser_conn->state);
667
		iscsi_destroy_endpoint(iser_conn->ep);
668 669
		iser_conn->state = ISER_CONN_DOWN;
	}
670 671 672 673 674
	/*
	 * In case we never got to bind stage, we still need to
	 * release IB resources (which is safe to call more than once).
	 */
	iser_free_ib_conn_res(iser_conn, true);
675
	mutex_unlock(&iser_conn->state_mutex);
676

S
Sagi Grimberg 已提交
677 678 679
	if (ib_conn->cma_id != NULL) {
		rdma_destroy_id(ib_conn->cma_id);
		ib_conn->cma_id = NULL;
R
Roi Dayan 已提交
680
	}
S
Sagi Grimberg 已提交
681

682
	kfree(iser_conn);
683 684
}

685 686
/**
 * triggers start of the disconnect procedures and wait for them to be done
687
 * Called with state mutex held
688
 */
689
int iser_conn_terminate(struct iser_conn *iser_conn)
690
{
S
Sagi Grimberg 已提交
691
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
692
	struct ib_send_wr *bad_wr;
693 694
	int err = 0;

695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
	/* terminate the iser conn only if the conn state is UP */
	if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
				       ISER_CONN_TERMINATING))
		return 0;

	iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);

	/* suspend queuing of new iscsi commands */
	if (iser_conn->iscsi_conn)
		iscsi_suspend_queue(iser_conn->iscsi_conn);

	/*
	 * In case we didn't already clean up the cma_id (peer initiated
	 * a disconnection), we need to Cause the CMA to change the QP
	 * state to ERROR.
710
	 */
711 712 713 714 715 716
	if (ib_conn->cma_id) {
		err = rdma_disconnect(ib_conn->cma_id);
		if (err)
			iser_err("Failed to disconnect, conn: 0x%p err %d\n",
				 iser_conn, err);

717 718
		/* post an indication that all flush errors were consumed */
		err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr);
719
		if (err) {
720
			iser_err("conn %p failed to post beacon", ib_conn);
721 722
			return 1;
		}
723

724
		wait_for_completion(&ib_conn->flush_comp);
725
	}
726

727
	return 1;
728 729
}

730 731 732
/**
 * Called with state mutex held
 **/
733
static void iser_connect_error(struct rdma_cm_id *cma_id)
734
{
735
	struct iser_conn *iser_conn;
736

737
	iser_conn = (struct iser_conn *)cma_id->context;
738
	iser_conn->state = ISER_CONN_TERMINATING;
739 740
}

741 742 743
/**
 * Called with state mutex held
 **/
744
static void iser_addr_handler(struct rdma_cm_id *cma_id)
745 746
{
	struct iser_device *device;
747
	struct iser_conn   *iser_conn;
S
Sagi Grimberg 已提交
748
	struct ib_conn   *ib_conn;
749 750
	int    ret;

751 752
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
753 754 755
		/* bailout */
		return;

S
Sagi Grimberg 已提交
756
	ib_conn = &iser_conn->ib_conn;
757
	device = iser_device_find_by_ib_device(cma_id);
758 759
	if (!device) {
		iser_err("device lookup/creation failed\n");
760 761
		iser_connect_error(cma_id);
		return;
762 763
	}

S
Sagi Grimberg 已提交
764
	ib_conn->device = device;
765

766 767 768 769 770 771
	/* connection T10-PI support */
	if (iser_pi_enable) {
		if (!(device->dev_attr.device_cap_flags &
		      IB_DEVICE_SIGNATURE_HANDOVER)) {
			iser_warn("T10-PI requested but not supported on %s, "
				  "continue without T10-PI\n",
S
Sagi Grimberg 已提交
772 773
				  ib_conn->device->ib_device->name);
			ib_conn->pi_support = false;
774
		} else {
S
Sagi Grimberg 已提交
775
			ib_conn->pi_support = true;
776 777 778
		}
	}

779 780 781
	ret = rdma_resolve_route(cma_id, 1000);
	if (ret) {
		iser_err("resolve route failed: %d\n", ret);
782 783
		iser_connect_error(cma_id);
		return;
784 785 786
	}
}

787 788 789
/**
 * Called with state mutex held
 **/
790
static void iser_route_handler(struct rdma_cm_id *cma_id)
791 792 793
{
	struct rdma_conn_param conn_param;
	int    ret;
794
	struct iser_cm_hdr req_hdr;
795
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
S
Sagi Grimberg 已提交
796 797
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
798

799
	if (iser_conn->state != ISER_CONN_PENDING)
800 801 802
		/* bailout */
		return;

S
Sagi Grimberg 已提交
803
	ret = iser_create_ib_conn_res(ib_conn);
804 805 806 807
	if (ret)
		goto failure;

	memset(&conn_param, 0, sizeof conn_param);
808
	conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
809 810 811 812
	conn_param.initiator_depth     = 1;
	conn_param.retry_count	       = 7;
	conn_param.rnr_retry_count     = 6;

813 814 815 816 817 818
	memset(&req_hdr, 0, sizeof(req_hdr));
	req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
			ISER_SEND_W_INV_NOT_SUPPORTED);
	conn_param.private_data		= (void *)&req_hdr;
	conn_param.private_data_len	= sizeof(struct iser_cm_hdr);

819 820 821 822 823 824
	ret = rdma_connect(cma_id, &conn_param);
	if (ret) {
		iser_err("failure connecting: %d\n", ret);
		goto failure;
	}

825
	return;
826
failure:
827
	iser_connect_error(cma_id);
828 829 830 831
}

static void iser_connected_handler(struct rdma_cm_id *cma_id)
{
832
	struct iser_conn *iser_conn;
833 834 835
	struct ib_qp_attr attr;
	struct ib_qp_init_attr init_attr;

836 837
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
838 839 840
		/* bailout */
		return;

841 842
	(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
	iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
843

844 845
	iser_conn->state = ISER_CONN_UP;
	complete(&iser_conn->up_completion);
846 847
}

848
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
849
{
850
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
851

852
	if (iser_conn_terminate(iser_conn)) {
853
		if (iser_conn->iscsi_conn)
854 855
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);
856 857 858
		else
			iser_err("iscsi_iser connection isn't bound\n");
	}
859 860 861
}

static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
862
				 bool destroy)
863 864
{
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
865

866 867 868 869
	/*
	 * We are not guaranteed that we visited disconnected_handler
	 * by now, call it here to be safe that we handle CM drep
	 * and flush errors.
870
	 */
871
	iser_disconnected_handler(cma_id);
872
	iser_free_ib_conn_res(iser_conn, destroy);
873 874
	complete(&iser_conn->ib_completion);
};
875 876 877

static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
878
	struct iser_conn *iser_conn;
879
	int ret = 0;
880

881
	iser_conn = (struct iser_conn *)cma_id->context;
882 883 884
	iser_info("%s (%d): status %d conn %p id %p\n",
		  rdma_event_msg(event->event), event->event,
		  event->status, cma_id->context, cma_id);
885

886
	mutex_lock(&iser_conn->state_mutex);
887 888
	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
889
		iser_addr_handler(cma_id);
890 891
		break;
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
892
		iser_route_handler(cma_id);
893 894 895 896 897 898 899 900 901
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		iser_connected_handler(cma_id);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
	case RDMA_CM_EVENT_ROUTE_ERROR:
	case RDMA_CM_EVENT_CONNECT_ERROR:
	case RDMA_CM_EVENT_UNREACHABLE:
	case RDMA_CM_EVENT_REJECTED:
902
		iser_connect_error(cma_id);
903 904
		break;
	case RDMA_CM_EVENT_DISCONNECTED:
905
	case RDMA_CM_EVENT_ADDR_CHANGE:
906 907
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
		iser_cleanup_handler(cma_id, false);
908
		break;
909 910 911 912
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		/*
		 * we *must* destroy the device as we cannot rely
		 * on iscsid to be around to initiate error handling.
913 914
		 * also if we are not in state DOWN implicitly destroy
		 * the cma_id.
915 916
		 */
		iser_cleanup_handler(cma_id, true);
917 918 919 920
		if (iser_conn->state != ISER_CONN_DOWN) {
			iser_conn->ib_conn.cma_id = NULL;
			ret = 1;
		}
921
		break;
922
	default:
923 924
		iser_err("Unexpected RDMA CM event: %s (%d)\n",
			 rdma_event_msg(event->event), event->event);
925 926
		break;
	}
927
	mutex_unlock(&iser_conn->state_mutex);
928 929

	return ret;
930 931
}

932
void iser_conn_init(struct iser_conn *iser_conn)
933
{
934
	iser_conn->state = ISER_CONN_INIT;
S
Sagi Grimberg 已提交
935
	iser_conn->ib_conn.post_recv_buf_count = 0;
936
	init_completion(&iser_conn->ib_conn.flush_comp);
937
	init_completion(&iser_conn->stop_completion);
938
	init_completion(&iser_conn->ib_completion);
939 940
	init_completion(&iser_conn->up_completion);
	INIT_LIST_HEAD(&iser_conn->conn_list);
S
Sagi Grimberg 已提交
941
	spin_lock_init(&iser_conn->ib_conn.lock);
942
	mutex_init(&iser_conn->state_mutex);
943 944 945 946
}

 /**
 * starts the process of connecting to the target
947
 * sleeps until the connection is established or rejected
948
 */
949
int iser_connect(struct iser_conn   *iser_conn,
R
Roi Dayan 已提交
950 951
		 struct sockaddr    *src_addr,
		 struct sockaddr    *dst_addr,
952 953
		 int                 non_blocking)
{
S
Sagi Grimberg 已提交
954
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
955 956
	int err = 0;

957
	mutex_lock(&iser_conn->state_mutex);
958

959
	sprintf(iser_conn->name, "%pISp", dst_addr);
R
Roi Dayan 已提交
960

961
	iser_info("connecting to: %s\n", iser_conn->name);
962 963

	/* the device is known only --after-- address resolution */
S
Sagi Grimberg 已提交
964
	ib_conn->device = NULL;
965

966
	iser_conn->state = ISER_CONN_PENDING;
967

968 969 970
	ib_conn->beacon.wr_id = ISER_BEACON_WRID;
	ib_conn->beacon.opcode = IB_WR_SEND;

S
Sagi Grimberg 已提交
971 972 973 974 975
	ib_conn->cma_id = rdma_create_id(iser_cma_handler,
					 (void *)iser_conn,
					 RDMA_PS_TCP, IB_QPT_RC);
	if (IS_ERR(ib_conn->cma_id)) {
		err = PTR_ERR(ib_conn->cma_id);
976 977 978 979
		iser_err("rdma_create_id failed: %d\n", err);
		goto id_failure;
	}

S
Sagi Grimberg 已提交
980
	err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
981 982 983 984 985 986
	if (err) {
		iser_err("rdma_resolve_addr failed: %d\n", err);
		goto addr_failure;
	}

	if (!non_blocking) {
987
		wait_for_completion_interruptible(&iser_conn->up_completion);
988

989
		if (iser_conn->state != ISER_CONN_UP) {
990 991 992 993
			err =  -EIO;
			goto connect_failure;
		}
	}
994
	mutex_unlock(&iser_conn->state_mutex);
995 996

	mutex_lock(&ig.connlist_mutex);
997
	list_add(&iser_conn->conn_list, &ig.connlist);
998 999 1000 1001
	mutex_unlock(&ig.connlist_mutex);
	return 0;

id_failure:
S
Sagi Grimberg 已提交
1002
	ib_conn->cma_id = NULL;
1003
addr_failure:
1004
	iser_conn->state = ISER_CONN_DOWN;
1005
connect_failure:
1006 1007
	mutex_unlock(&iser_conn->state_mutex);
	iser_conn_release(iser_conn);
1008 1009 1010
	return err;
}

1011
int iser_post_recvl(struct iser_conn *iser_conn)
1012 1013
{
	struct ib_recv_wr rx_wr, *rx_wr_failed;
S
Sagi Grimberg 已提交
1014
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1015 1016 1017
	struct ib_sge	  sge;
	int ib_ret;

1018
	sge.addr   = iser_conn->login_resp_dma;
1019
	sge.length = ISER_RX_LOGIN_SIZE;
S
Sagi Grimberg 已提交
1020
	sge.lkey   = ib_conn->device->mr->lkey;
1021

S
Sagi Grimberg 已提交
1022
	rx_wr.wr_id   = (uintptr_t)iser_conn->login_resp_buf;
1023 1024 1025 1026
	rx_wr.sg_list = &sge;
	rx_wr.num_sge = 1;
	rx_wr.next    = NULL;

S
Sagi Grimberg 已提交
1027 1028
	ib_conn->post_recv_buf_count++;
	ib_ret	= ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
1029 1030
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1031
		ib_conn->post_recv_buf_count--;
1032 1033 1034 1035
	}
	return ib_ret;
}

1036
int iser_post_recvm(struct iser_conn *iser_conn, int count)
1037 1038 1039
{
	struct ib_recv_wr *rx_wr, *rx_wr_failed;
	int i, ib_ret;
S
Sagi Grimberg 已提交
1040
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1041
	unsigned int my_rx_head = iser_conn->rx_desc_head;
1042 1043
	struct iser_rx_desc *rx_desc;

S
Sagi Grimberg 已提交
1044
	for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1045
		rx_desc		= &iser_conn->rx_descs[my_rx_head];
S
Sagi Grimberg 已提交
1046
		rx_wr->wr_id	= (uintptr_t)rx_desc;
1047 1048 1049
		rx_wr->sg_list	= &rx_desc->rx_sg;
		rx_wr->num_sge	= 1;
		rx_wr->next	= rx_wr + 1;
1050
		my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1051 1052 1053 1054 1055
	}

	rx_wr--;
	rx_wr->next = NULL; /* mark end of work requests list */

S
Sagi Grimberg 已提交
1056 1057
	ib_conn->post_recv_buf_count += count;
	ib_ret	= ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
1058 1059
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1060
		ib_conn->post_recv_buf_count -= count;
1061
	} else
1062
		iser_conn->rx_desc_head = my_rx_head;
1063 1064 1065 1066
	return ib_ret;
}


1067 1068 1069 1070 1071
/**
 * iser_start_send - Initiate a Send DTO operation
 *
 * returns 0 on success, -1 on failure
 */
1072 1073
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
		   bool signal)
1074
{
1075
	int		  ib_ret;
1076 1077
	struct ib_send_wr send_wr, *send_wr_failed;

S
Sagi Grimberg 已提交
1078
	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1079 1080
				      tx_desc->dma_addr, ISER_HEADERS_LEN,
				      DMA_TO_DEVICE);
1081 1082

	send_wr.next	   = NULL;
S
Sagi Grimberg 已提交
1083
	send_wr.wr_id	   = (uintptr_t)tx_desc;
1084 1085
	send_wr.sg_list	   = tx_desc->tx_sg;
	send_wr.num_sge	   = tx_desc->num_sge;
1086
	send_wr.opcode	   = IB_WR_SEND;
1087
	send_wr.send_flags = signal ? IB_SEND_SIGNALED : 0;
1088

S
Sagi Grimberg 已提交
1089
	ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
1090
	if (ib_ret)
1091
		iser_err("ib_post_send failed, ret:%d\n", ib_ret);
1092

1093
	return ib_ret;
1094 1095
}

1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
/**
 * is_iser_tx_desc - Indicate if the completion wr_id
 *     is a TX descriptor or not.
 * @iser_conn: iser connection
 * @wr_id: completion WR identifier
 *
 * Since we cannot rely on wc opcode in FLUSH errors
 * we must work around it by checking if the wr_id address
 * falls in the iser connection rx_descs buffer. If so
 * it is an RX descriptor, otherwize it is a TX.
 */
static inline bool
is_iser_tx_desc(struct iser_conn *iser_conn, void *wr_id)
{
	void *start = iser_conn->rx_descs;
	int len = iser_conn->num_rx_descs * sizeof(*iser_conn->rx_descs);

	if (wr_id >= start && wr_id < start + len)
		return false;

	return true;
}

1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
/**
 * iser_handle_comp_error() - Handle error completion
 * @ib_conn:   connection RDMA resources
 * @wc:        work completion
 *
 * Notes: We may handle a FLUSH error completion and in this case
 *        we only cleanup in case TX type was DATAOUT. For non-FLUSH
 *        error completion we should also notify iscsi layer that
 *        connection is failed (in case we passed bind stage).
 */
static void
1130
iser_handle_comp_error(struct ib_conn *ib_conn,
1131
		       struct ib_wc *wc)
1132
{
S
Sagi Grimberg 已提交
1133
	void *wr_id = (void *)(uintptr_t)wc->wr_id;
1134 1135 1136 1137 1138 1139 1140 1141
	struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
						   ib_conn);

	if (wc->status != IB_WC_WR_FLUSH_ERR)
		if (iser_conn->iscsi_conn)
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);

1142 1143 1144
	if (wc->wr_id == ISER_FASTREG_LI_WRID)
		return;

S
Sagi Grimberg 已提交
1145 1146
	if (is_iser_tx_desc(iser_conn, wr_id)) {
		struct iser_tx_desc *desc = wr_id;
1147 1148 1149 1150 1151 1152

		if (desc->type == ISCSI_TX_DATAOUT)
			kmem_cache_free(ig.desc_cache, desc);
	} else {
		ib_conn->post_recv_buf_count--;
	}
1153 1154
}

1155 1156 1157 1158 1159 1160 1161 1162 1163
/**
 * iser_handle_wc - handle a single work completion
 * @wc: work completion
 *
 * Soft-IRQ context, work completion can be either
 * SEND or RECV, and can turn out successful or
 * with error (or flush error).
 */
static void iser_handle_wc(struct ib_wc *wc)
1164
{
S
Sagi Grimberg 已提交
1165
	struct ib_conn *ib_conn;
1166 1167
	struct iser_tx_desc *tx_desc;
	struct iser_rx_desc *rx_desc;
1168

1169
	ib_conn = wc->qp->qp_context;
1170
	if (likely(wc->status == IB_WC_SUCCESS)) {
1171
		if (wc->opcode == IB_WC_RECV) {
S
Sagi Grimberg 已提交
1172
			rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
1173 1174 1175 1176
			iser_rcv_completion(rx_desc, wc->byte_len,
					    ib_conn);
		} else
		if (wc->opcode == IB_WC_SEND) {
S
Sagi Grimberg 已提交
1177
			tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
1178
			iser_snd_completion(tx_desc, ib_conn);
1179
		} else {
1180
			iser_err("Unknown wc opcode %d\n", wc->opcode);
1181
		}
1182 1183
	} else {
		if (wc->status != IB_WC_WR_FLUSH_ERR)
1184 1185 1186
			iser_err("%s (%d): wr id %llx vend_err %x\n",
				 ib_wc_status_msg(wc->status), wc->status,
				 wc->wr_id, wc->vendor_err);
1187
		else
1188 1189 1190
			iser_dbg("%s (%d): wr id %llx\n",
				 ib_wc_status_msg(wc->status), wc->status,
				 wc->wr_id);
1191

1192
		if (wc->wr_id == ISER_BEACON_WRID)
1193
			/* all flush errors were consumed */
1194
			complete(&ib_conn->flush_comp);
1195 1196
		else
			iser_handle_comp_error(ib_conn, wc);
1197 1198 1199
	}
}

1200 1201 1202 1203 1204 1205 1206
/**
 * iser_cq_tasklet_fn - iSER completion polling loop
 * @data: iSER completion context
 *
 * Soft-IRQ context, polling connection CQ until
 * either CQ was empty or we exausted polling budget
 */
1207 1208
static void iser_cq_tasklet_fn(unsigned long data)
{
1209
	struct iser_comp *comp = (struct iser_comp *)data;
1210
	struct ib_cq *cq = comp->cq;
1211 1212
	struct ib_wc *const wcs = comp->wcs;
	int i, n, completed = 0;
1213

1214 1215 1216
	while ((n = ib_poll_cq(cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
		for (i = 0; i < n; i++)
			iser_handle_wc(&wcs[i]);
1217

1218 1219
		completed += n;
		if (completed >= iser_cq_poll_limit)
1220
			break;
1221
	}
1222 1223 1224 1225 1226

	/*
	 * It is assumed here that arming CQ only once its empty
	 * would not cause interrupts to be missed.
	 */
1227
	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1228

1229
	iser_dbg("got %d completions\n", completed);
1230 1231 1232 1233
}

static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
{
1234
	struct iser_comp *comp = cq_context;
1235

1236
	tasklet_schedule(&comp->tasklet);
1237
}
1238 1239 1240 1241

u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
			     enum iser_data_dir cmd_dir, sector_t *sector)
{
1242
	struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
	struct fast_reg_descriptor *desc = reg->mem_h;
	unsigned long sector_size = iser_task->sc->device->sector_size;
	struct ib_mr_status mr_status;
	int ret;

	if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
		desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
		ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
					 IB_MR_CHECK_SIG_STATUS, &mr_status);
		if (ret) {
			pr_err("ib_check_mr_status failed, ret %d\n", ret);
			goto err;
		}

		if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
			sector_t sector_off = mr_status.sig_err.sig_err_offset;

			do_div(sector_off, sector_size + 8);
			*sector = scsi_get_lba(iser_task->sc) + sector_off;

1263
			pr_err("PI error found type %d at sector %llx "
1264
			       "expected %x vs actual %x\n",
1265 1266
			       mr_status.sig_err.err_type,
			       (unsigned long long)*sector,
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
			       mr_status.sig_err.expected,
			       mr_status.sig_err.actual);

			switch (mr_status.sig_err.err_type) {
			case IB_SIG_BAD_GUARD:
				return 0x1;
			case IB_SIG_BAD_REFTAG:
				return 0x3;
			case IB_SIG_BAD_APPTAG:
				return 0x2;
			}
		}
	}

	return 0;
err:
	/* Not alot we can do here, return ambiguous guard error */
	return 0x1;
}