iser_verbs.c 35.4 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/module.h>
36
#include <linux/slab.h>
37 38 39 40 41
#include <linux/delay.h>

#include "iscsi_iser.h"

#define ISCSI_ISER_MAX_CONN	8
42 43
#define ISER_MAX_RX_CQ_LEN	(ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
#define ISER_MAX_TX_CQ_LEN	(ISER_QP_MAX_REQ_DTOS  * ISCSI_ISER_MAX_CONN)
44

45 46
static int iser_cq_poll_limit = 512;

47 48
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
49
static int iser_drain_tx_cq(struct iser_comp *comp);
50 51 52 53 54 55 56 57 58 59 60

static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
	iser_err("got cq event %d \n", cause->event);
}

static void iser_qp_event_callback(struct ib_event *cause, void *context)
{
	iser_err("got qp event %d\n",cause->event);
}

61 62 63 64 65 66 67
static void iser_event_handler(struct ib_event_handler *handler,
				struct ib_event *event)
{
	iser_err("async event %d on device %s port %d\n", event->event,
		event->device->name, event->element.port_num);
}

68 69 70 71 72 73 74 75 76
/**
 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
 * the adapator.
 *
 * returns 0 on success, -1 on failure
 */
static int iser_create_device_ib_res(struct iser_device *device)
{
77
	struct ib_device_attr *dev_attr = &device->dev_attr;
78
	int ret, i;
79

80 81
	ret = ib_query_device(device->ib_device, dev_attr);
	if (ret) {
82
		pr_warn("Query device failed for %s\n", device->ib_device->name);
83
		return ret;
84 85 86 87 88 89 90 91 92 93 94 95
	}

	/* Assign function handles  - based on FMR support */
	if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
	    device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
		iser_info("FMR supported, using FMR for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
		device->iser_free_rdma_reg_res = iser_free_fmr_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
	} else
	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
96 97 98 99 100
		iser_info("FastReg supported, using FastReg for registration\n");
		device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
		device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
		device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
		device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
101
	} else {
102
		iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
103
		return -1;
104
	}
105

106 107
	device->comps_used = min(ISER_MAX_CQ,
				 device->ib_device->num_comp_vectors);
108
	iser_info("using %d CQs, device %s supports %d vectors\n",
109
		  device->comps_used, device->ib_device->name,
110
		  device->ib_device->num_comp_vectors);
111

112 113 114 115
	device->pd = ib_alloc_pd(device->ib_device);
	if (IS_ERR(device->pd))
		goto pd_err;

116 117 118 119 120 121 122 123 124 125 126
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

		comp->device = device;
		comp->rx_cq = ib_create_cq(device->ib_device,
					   iser_cq_callback,
					   iser_cq_event_callback,
					   (void *)comp,
					   ISER_MAX_RX_CQ_LEN, i);
		if (IS_ERR(comp->rx_cq)) {
			comp->rx_cq = NULL;
127
			goto cq_err;
128
		}
129

130 131 132 133 134 135
		comp->tx_cq = ib_create_cq(device->ib_device, NULL,
					   iser_cq_event_callback,
					   (void *)comp,
					   ISER_MAX_TX_CQ_LEN, i);
		if (IS_ERR(comp->tx_cq)) {
			comp->tx_cq = NULL;
136
			goto cq_err;
137
		}
138

139
		if (ib_req_notify_cq(comp->rx_cq, IB_CQ_NEXT_COMP))
140
			goto cq_err;
141

142 143
		tasklet_init(&comp->tasklet, iser_cq_tasklet_fn,
			     (unsigned long)comp);
144
	}
145

146 147 148
	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
				   IB_ACCESS_REMOTE_WRITE |
				   IB_ACCESS_REMOTE_READ);
149 150 151
	if (IS_ERR(device->mr))
		goto dma_mr_err;

152 153 154 155 156
	INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
				iser_event_handler);
	if (ib_register_event_handler(&device->event_handler))
		goto handler_err;

157 158
	return 0;

159 160
handler_err:
	ib_dereg_mr(device->mr);
161
dma_mr_err:
162 163
	for (i = 0; i < device->comps_used; i++)
		tasklet_kill(&device->comps[i].tasklet);
164
cq_err:
165 166 167 168 169 170 171
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

		if (comp->tx_cq)
			ib_destroy_cq(comp->tx_cq);
		if (comp->rx_cq)
			ib_destroy_cq(comp->rx_cq);
172
	}
173 174 175 176 177 178 179
	ib_dealloc_pd(device->pd);
pd_err:
	iser_err("failed to allocate an IB resource\n");
	return -1;
}

/**
180
 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
181 182 183 184
 * CQ and PD created with the device associated with the adapator.
 */
static void iser_free_device_ib_res(struct iser_device *device)
{
185
	int i;
186 187
	BUG_ON(device->mr == NULL);

188 189 190 191 192 193 194 195
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];

		tasklet_kill(&comp->tasklet);
		ib_destroy_cq(comp->tx_cq);
		ib_destroy_cq(comp->rx_cq);
		comp->tx_cq = NULL;
		comp->rx_cq = NULL;
196 197
	}

198
	(void)ib_unregister_event_handler(&device->event_handler);
199 200 201 202 203 204 205 206
	(void)ib_dereg_mr(device->mr);
	(void)ib_dealloc_pd(device->pd);

	device->mr = NULL;
	device->pd = NULL;
}

/**
207
 * iser_create_fmr_pool - Creates FMR pool and page_vector
208
 *
209
 * returns 0 on success, or errno code on failure
210
 */
S
Sagi Grimberg 已提交
211
int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
212
{
S
Sagi Grimberg 已提交
213
	struct iser_device *device = ib_conn->device;
214
	struct ib_fmr_pool_param params;
215
	int ret = -ENOMEM;
216

S
Sagi Grimberg 已提交
217
	ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
218 219
					(sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
					GFP_KERNEL);
S
Sagi Grimberg 已提交
220
	if (!ib_conn->fmr.page_vec)
221
		return ret;
222

S
Sagi Grimberg 已提交
223
	ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
224

225
	params.page_shift        = SHIFT_4K;
226 227 228 229 230
	/* when the first/last SG element are not start/end *
	 * page aligned, the map whould be of N+1 pages     */
	params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
	/* make the pool size twice the max number of SCSI commands *
	 * the ML is expected to queue, watermark for unmap at 50%  */
231 232
	params.pool_size	 = cmds_max * 2;
	params.dirty_watermark	 = cmds_max;
233 234 235 236 237 238
	params.cache		 = 0;
	params.flush_function	 = NULL;
	params.access		 = (IB_ACCESS_LOCAL_WRITE  |
				    IB_ACCESS_REMOTE_WRITE |
				    IB_ACCESS_REMOTE_READ);

S
Sagi Grimberg 已提交
239 240
	ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
	if (!IS_ERR(ib_conn->fmr.pool))
241 242 243
		return 0;

	/* no FMR => no need for page_vec */
S
Sagi Grimberg 已提交
244 245
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
246

S
Sagi Grimberg 已提交
247 248
	ret = PTR_ERR(ib_conn->fmr.pool);
	ib_conn->fmr.pool = NULL;
249 250 251 252
	if (ret != -ENOSYS) {
		iser_err("FMR allocation failed, err %d\n", ret);
		return ret;
	} else {
253
		iser_warn("FMRs are not supported, using unaligned mode\n");
254
		return 0;
255
	}
256 257 258 259 260
}

/**
 * iser_free_fmr_pool - releases the FMR pool and page vec
 */
S
Sagi Grimberg 已提交
261
void iser_free_fmr_pool(struct ib_conn *ib_conn)
262 263
{
	iser_info("freeing conn %p fmr pool %p\n",
S
Sagi Grimberg 已提交
264
		  ib_conn, ib_conn->fmr.pool);
265

S
Sagi Grimberg 已提交
266 267
	if (ib_conn->fmr.pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr.pool);
268

S
Sagi Grimberg 已提交
269
	ib_conn->fmr.pool = NULL;
270

S
Sagi Grimberg 已提交
271 272
	kfree(ib_conn->fmr.page_vec);
	ib_conn->fmr.page_vec = NULL;
273 274
}

275 276
static int
iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
277
			 bool pi_enable, struct fast_reg_descriptor *desc)
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
{
	int ret;

	desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
						      ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_frpl)) {
		ret = PTR_ERR(desc->data_frpl);
		iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
			 ret);
		return PTR_ERR(desc->data_frpl);
	}

	desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
	if (IS_ERR(desc->data_mr)) {
		ret = PTR_ERR(desc->data_mr);
		iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
		goto fast_reg_mr_failure;
	}
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
	desc->reg_indicators |= ISER_DATA_KEY_VALID;

	if (pi_enable) {
		struct ib_mr_init_attr mr_init_attr = {0};
		struct iser_pi_context *pi_ctx = NULL;

		desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
		if (!desc->pi_ctx) {
			iser_err("Failed to allocate pi context\n");
			ret = -ENOMEM;
			goto pi_ctx_alloc_failure;
		}
		pi_ctx = desc->pi_ctx;

		pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
						    ISCSI_ISER_SG_TABLESIZE);
		if (IS_ERR(pi_ctx->prot_frpl)) {
			ret = PTR_ERR(pi_ctx->prot_frpl);
			iser_err("Failed to allocate prot frpl ret=%d\n",
				 ret);
			goto prot_frpl_failure;
		}

		pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
						ISCSI_ISER_SG_TABLESIZE + 1);
		if (IS_ERR(pi_ctx->prot_mr)) {
			ret = PTR_ERR(pi_ctx->prot_mr);
			iser_err("Failed to allocate prot frmr ret=%d\n",
				 ret);
			goto prot_mr_failure;
		}
		desc->reg_indicators |= ISER_PROT_KEY_VALID;

		mr_init_attr.max_reg_descriptors = 2;
		mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
		pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
		if (IS_ERR(pi_ctx->sig_mr)) {
			ret = PTR_ERR(pi_ctx->sig_mr);
			iser_err("Failed to allocate signature enabled mr err=%d\n",
				 ret);
			goto sig_mr_failure;
		}
		desc->reg_indicators |= ISER_SIG_KEY_VALID;
	}
	desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;

342 343
	iser_dbg("Create fr_desc %p page_list %p\n",
		 desc, desc->data_frpl->page_list);
344 345

	return 0;
346 347 348 349 350 351 352 353
sig_mr_failure:
	ib_dereg_mr(desc->pi_ctx->prot_mr);
prot_mr_failure:
	ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
prot_frpl_failure:
	kfree(desc->pi_ctx);
pi_ctx_alloc_failure:
	ib_dereg_mr(desc->data_mr);
354 355 356 357 358 359
fast_reg_mr_failure:
	ib_free_fast_reg_page_list(desc->data_frpl);

	return ret;
}

360
/**
361
 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
362 363 364
 * for fast registration work requests.
 * returns 0 on success, or errno code on failure
 */
S
Sagi Grimberg 已提交
365
int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
366
{
S
Sagi Grimberg 已提交
367 368
	struct iser_device *device = ib_conn->device;
	struct fast_reg_descriptor *desc;
369 370
	int i, ret;

S
Sagi Grimberg 已提交
371 372
	INIT_LIST_HEAD(&ib_conn->fastreg.pool);
	ib_conn->fastreg.pool_size = 0;
373
	for (i = 0; i < cmds_max; i++) {
374
		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
375 376 377 378 379 380
		if (!desc) {
			iser_err("Failed to allocate a new fast_reg descriptor\n");
			ret = -ENOMEM;
			goto err;
		}

381
		ret = iser_create_fastreg_desc(device->ib_device, device->pd,
S
Sagi Grimberg 已提交
382
					       ib_conn->pi_support, desc);
383 384 385 386 387
		if (ret) {
			iser_err("Failed to create fastreg descriptor err=%d\n",
				 ret);
			kfree(desc);
			goto err;
388 389
		}

S
Sagi Grimberg 已提交
390 391
		list_add_tail(&desc->list, &ib_conn->fastreg.pool);
		ib_conn->fastreg.pool_size++;
392 393 394
	}

	return 0;
395

396
err:
S
Sagi Grimberg 已提交
397
	iser_free_fastreg_pool(ib_conn);
398 399 400 401
	return ret;
}

/**
402
 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
403
 */
S
Sagi Grimberg 已提交
404
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
405 406 407 408
{
	struct fast_reg_descriptor *desc, *tmp;
	int i = 0;

S
Sagi Grimberg 已提交
409
	if (list_empty(&ib_conn->fastreg.pool))
410 411
		return;

S
Sagi Grimberg 已提交
412
	iser_info("freeing conn %p fr pool\n", ib_conn);
413

S
Sagi Grimberg 已提交
414
	list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
415 416 417
		list_del(&desc->list);
		ib_free_fast_reg_page_list(desc->data_frpl);
		ib_dereg_mr(desc->data_mr);
418 419 420 421 422 423
		if (desc->pi_ctx) {
			ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
			ib_dereg_mr(desc->pi_ctx->prot_mr);
			ib_destroy_mr(desc->pi_ctx->sig_mr);
			kfree(desc->pi_ctx);
		}
424 425 426 427
		kfree(desc);
		++i;
	}

S
Sagi Grimberg 已提交
428
	if (i < ib_conn->fastreg.pool_size)
429
		iser_warn("pool still has %d regions registered\n",
S
Sagi Grimberg 已提交
430
			  ib_conn->fastreg.pool_size - i);
431 432
}

433 434 435 436 437
/**
 * iser_create_ib_conn_res - Queue-Pair (QP)
 *
 * returns 0 on success, -1 on failure
 */
S
Sagi Grimberg 已提交
438
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
439 440 441 442 443 444
{
	struct iser_device	*device;
	struct ib_qp_init_attr	init_attr;
	int			ret = -ENOMEM;
	int index, min_index = 0;

S
Sagi Grimberg 已提交
445
	BUG_ON(ib_conn->device == NULL);
446

S
Sagi Grimberg 已提交
447
	device = ib_conn->device;
448 449 450

	memset(&init_attr, 0, sizeof init_attr);

451 452
	mutex_lock(&ig.connlist_mutex);
	/* select the CQ with the minimal number of usages */
453 454 455
	for (index = 0; index < device->comps_used; index++) {
		if (device->comps[index].active_qps <
		    device->comps[min_index].active_qps)
456
			min_index = index;
457 458 459
	}
	ib_conn->comp = &device->comps[min_index];
	ib_conn->comp->active_qps++;
460
	mutex_unlock(&ig.connlist_mutex);
S
Sagi Grimberg 已提交
461
	iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
462

463
	init_attr.event_handler = iser_qp_event_callback;
S
Sagi Grimberg 已提交
464
	init_attr.qp_context	= (void *)ib_conn;
465 466
	init_attr.send_cq	= ib_conn->comp->tx_cq;
	init_attr.recv_cq	= ib_conn->comp->rx_cq;
467
	init_attr.cap.max_recv_wr  = ISER_QP_MAX_RECV_DTOS;
468
	init_attr.cap.max_send_sge = 2;
469
	init_attr.cap.max_recv_sge = 1;
470 471
	init_attr.sq_sig_type	= IB_SIGNAL_REQ_WR;
	init_attr.qp_type	= IB_QPT_RC;
S
Sagi Grimberg 已提交
472
	if (ib_conn->pi_support) {
473 474 475 476 477
		init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS;
		init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
	} else {
		init_attr.cap.max_send_wr  = ISER_QP_MAX_REQ_DTOS;
	}
478

S
Sagi Grimberg 已提交
479
	ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
480
	if (ret)
481
		goto out_err;
482

S
Sagi Grimberg 已提交
483
	ib_conn->qp = ib_conn->cma_id->qp;
484
	iser_info("setting conn %p cma_id %p qp %p\n",
S
Sagi Grimberg 已提交
485 486
		  ib_conn, ib_conn->cma_id,
		  ib_conn->cma_id->qp);
487 488
	return ret;

489
out_err:
490 491 492 493 494 495 496 497 498 499 500
	iser_err("unable to alloc mem or create resource, err %d\n", ret);
	return ret;
}

/**
 * based on the resolved device node GUID see if there already allocated
 * device for this device. If there's no such, create one.
 */
static
struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
{
A
Arne Redlich 已提交
501
	struct iser_device *device;
502 503 504

	mutex_lock(&ig.device_list_mutex);

A
Arne Redlich 已提交
505
	list_for_each_entry(device, &ig.device_list, ig_list)
506 507
		/* find if there's a match using the node GUID */
		if (device->ib_device->node_guid == cma_id->device->node_guid)
508
			goto inc_refcnt;
A
Arne Redlich 已提交
509 510 511 512 513 514 515 516 517 518 519 520

	device = kzalloc(sizeof *device, GFP_KERNEL);
	if (device == NULL)
		goto out;

	/* assign this device to the device */
	device->ib_device = cma_id->device;
	/* init the device and link it into ig device list */
	if (iser_create_device_ib_res(device)) {
		kfree(device);
		device = NULL;
		goto out;
521
	}
A
Arne Redlich 已提交
522 523
	list_add(&device->ig_list, &ig.device_list);

524
inc_refcnt:
525
	device->refcount++;
526
out:
527 528 529 530 531 532 533 534 535
	mutex_unlock(&ig.device_list_mutex);
	return device;
}

/* if there's no demand for this device, release it */
static void iser_device_try_release(struct iser_device *device)
{
	mutex_lock(&ig.device_list_mutex);
	device->refcount--;
536
	iser_info("device %p refcount %d\n", device, device->refcount);
537 538 539 540 541 542 543 544
	if (!device->refcount) {
		iser_free_device_ib_res(device);
		list_del(&device->ig_list);
		kfree(device);
	}
	mutex_unlock(&ig.device_list_mutex);
}

545 546 547
/**
 * Called with state mutex held
 **/
548 549 550
static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
				     enum iser_conn_state comp,
				     enum iser_conn_state exch)
551 552 553
{
	int ret;

554 555 556 557
	ret = (iser_conn->state == comp);
	if (ret)
		iser_conn->state = exch;

558 559 560
	return ret;
}

561 562
void iser_release_work(struct work_struct *work)
{
563
	struct iser_conn *iser_conn;
564

565
	iser_conn = container_of(work, struct iser_conn, release_work);
566

567 568 569 570
	/* Wait for conn_stop to complete */
	wait_for_completion(&iser_conn->stop_completion);
	/* Wait for IB resouces cleanup to complete */
	wait_for_completion(&iser_conn->ib_completion);
571

572 573 574
	mutex_lock(&iser_conn->state_mutex);
	iser_conn->state = ISER_CONN_DOWN;
	mutex_unlock(&iser_conn->state_mutex);
575

576
	iser_conn_release(iser_conn);
577 578
}

579 580 581
/**
 * iser_free_ib_conn_res - release IB related resources
 * @iser_conn: iser connection struct
582 583 584
 * @destroy_device: indicator if we need to try to release
 *     the iser device (only iscsi shutdown and DEVICE_REMOVAL
 *     will use this.
585 586 587 588 589
 *
 * This routine is called with the iser state mutex held
 * so the cm_id removal is out of here. It is Safe to
 * be invoked multiple times.
 */
590 591
static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
				  bool destroy_device)
592 593 594 595 596 597 598 599 600 601
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;

	iser_info("freeing conn %p cma_id %p qp %p\n",
		  iser_conn, ib_conn->cma_id, ib_conn->qp);

	iser_free_rx_descriptors(iser_conn);

	if (ib_conn->qp != NULL) {
602
		ib_conn->comp->active_qps--;
603 604 605 606
		rdma_destroy_qp(ib_conn->cma_id);
		ib_conn->qp = NULL;
	}

607
	if (destroy_device && device != NULL) {
608 609 610 611 612
		iser_device_try_release(device);
		ib_conn->device = NULL;
	}
}

613 614 615
/**
 * Frees all conn objects and deallocs conn descriptor
 */
616
void iser_conn_release(struct iser_conn *iser_conn)
617
{
S
Sagi Grimberg 已提交
618
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
619 620

	mutex_lock(&ig.connlist_mutex);
621
	list_del(&iser_conn->conn_list);
622
	mutex_unlock(&ig.connlist_mutex);
623

624
	mutex_lock(&iser_conn->state_mutex);
625 626 627
	if (iser_conn->state != ISER_CONN_DOWN)
		iser_warn("iser conn %p state %d, expected state down.\n",
			  iser_conn, iser_conn->state);
628 629 630 631 632
	/*
	 * In case we never got to bind stage, we still need to
	 * release IB resources (which is safe to call more than once).
	 */
	iser_free_ib_conn_res(iser_conn, true);
633
	mutex_unlock(&iser_conn->state_mutex);
634

S
Sagi Grimberg 已提交
635 636 637
	if (ib_conn->cma_id != NULL) {
		rdma_destroy_id(ib_conn->cma_id);
		ib_conn->cma_id = NULL;
R
Roi Dayan 已提交
638
	}
S
Sagi Grimberg 已提交
639

640
	kfree(iser_conn);
641 642
}

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
/**
 * iser_poll_for_flush_errors - Don't settle for less than all.
 * @struct ib_conn: IB context of the connection
 *
 * This routine is called when the QP is in error state
 * It polls the send CQ until all flush errors are consumed and
 * returns when all flush errors were processed.
 */
static void iser_poll_for_flush_errors(struct ib_conn *ib_conn)
{
	int count = 0;

	while (ib_conn->post_recv_buf_count > 0 ||
	       atomic_read(&ib_conn->post_send_buf_count) > 0) {
		msleep(100);
		if (atomic_read(&ib_conn->post_send_buf_count) > 0)
659
			iser_drain_tx_cq(ib_conn->comp);
660 661 662 663 664 665 666 667 668 669

		count++;
		/* Don't flood with prints */
		if (count % 30 == 0)
			iser_dbg("post_recv %d post_send %d",
				 ib_conn->post_recv_buf_count,
				 atomic_read(&ib_conn->post_send_buf_count));
	}
}

670 671
/**
 * triggers start of the disconnect procedures and wait for them to be done
672
 * Called with state mutex held
673
 */
674
int iser_conn_terminate(struct iser_conn *iser_conn)
675
{
S
Sagi Grimberg 已提交
676
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
677 678
	int err = 0;

679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
	/* terminate the iser conn only if the conn state is UP */
	if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
				       ISER_CONN_TERMINATING))
		return 0;

	iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);

	/* suspend queuing of new iscsi commands */
	if (iser_conn->iscsi_conn)
		iscsi_suspend_queue(iser_conn->iscsi_conn);

	/*
	 * In case we didn't already clean up the cma_id (peer initiated
	 * a disconnection), we need to Cause the CMA to change the QP
	 * state to ERROR.
694
	 */
695 696 697 698 699 700 701 702
	if (ib_conn->cma_id) {
		err = rdma_disconnect(ib_conn->cma_id);
		if (err)
			iser_err("Failed to disconnect, conn: 0x%p err %d\n",
				 iser_conn, err);

		iser_poll_for_flush_errors(ib_conn);
	}
703

704
	return 1;
705 706
}

707 708 709
/**
 * Called with state mutex held
 **/
710
static void iser_connect_error(struct rdma_cm_id *cma_id)
711
{
712
	struct iser_conn *iser_conn;
713

714 715
	iser_conn = (struct iser_conn *)cma_id->context;
	iser_conn->state = ISER_CONN_DOWN;
716 717
}

718 719 720
/**
 * Called with state mutex held
 **/
721
static void iser_addr_handler(struct rdma_cm_id *cma_id)
722 723
{
	struct iser_device *device;
724
	struct iser_conn   *iser_conn;
S
Sagi Grimberg 已提交
725
	struct ib_conn   *ib_conn;
726 727
	int    ret;

728 729
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
730 731 732
		/* bailout */
		return;

S
Sagi Grimberg 已提交
733
	ib_conn = &iser_conn->ib_conn;
734
	device = iser_device_find_by_ib_device(cma_id);
735 736
	if (!device) {
		iser_err("device lookup/creation failed\n");
737 738
		iser_connect_error(cma_id);
		return;
739 740
	}

S
Sagi Grimberg 已提交
741
	ib_conn->device = device;
742

743 744 745 746 747 748
	/* connection T10-PI support */
	if (iser_pi_enable) {
		if (!(device->dev_attr.device_cap_flags &
		      IB_DEVICE_SIGNATURE_HANDOVER)) {
			iser_warn("T10-PI requested but not supported on %s, "
				  "continue without T10-PI\n",
S
Sagi Grimberg 已提交
749 750
				  ib_conn->device->ib_device->name);
			ib_conn->pi_support = false;
751
		} else {
S
Sagi Grimberg 已提交
752
			ib_conn->pi_support = true;
753 754 755
		}
	}

756 757 758
	ret = rdma_resolve_route(cma_id, 1000);
	if (ret) {
		iser_err("resolve route failed: %d\n", ret);
759 760
		iser_connect_error(cma_id);
		return;
761 762 763
	}
}

764 765 766
/**
 * Called with state mutex held
 **/
767
static void iser_route_handler(struct rdma_cm_id *cma_id)
768 769 770
{
	struct rdma_conn_param conn_param;
	int    ret;
771
	struct iser_cm_hdr req_hdr;
772
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
S
Sagi Grimberg 已提交
773 774
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
775

776
	if (iser_conn->state != ISER_CONN_PENDING)
777 778 779
		/* bailout */
		return;

S
Sagi Grimberg 已提交
780
	ret = iser_create_ib_conn_res(ib_conn);
781 782 783 784
	if (ret)
		goto failure;

	memset(&conn_param, 0, sizeof conn_param);
785
	conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
786 787 788 789
	conn_param.initiator_depth     = 1;
	conn_param.retry_count	       = 7;
	conn_param.rnr_retry_count     = 6;

790 791 792 793 794 795
	memset(&req_hdr, 0, sizeof(req_hdr));
	req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
			ISER_SEND_W_INV_NOT_SUPPORTED);
	conn_param.private_data		= (void *)&req_hdr;
	conn_param.private_data_len	= sizeof(struct iser_cm_hdr);

796 797 798 799 800 801
	ret = rdma_connect(cma_id, &conn_param);
	if (ret) {
		iser_err("failure connecting: %d\n", ret);
		goto failure;
	}

802
	return;
803
failure:
804
	iser_connect_error(cma_id);
805 806 807 808
}

static void iser_connected_handler(struct rdma_cm_id *cma_id)
{
809
	struct iser_conn *iser_conn;
810 811 812
	struct ib_qp_attr attr;
	struct ib_qp_init_attr init_attr;

813 814
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
815 816 817
		/* bailout */
		return;

818 819
	(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
	iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
820

821 822
	iser_conn->state = ISER_CONN_UP;
	complete(&iser_conn->up_completion);
823 824
}

825
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
826
{
827
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
828

829
	if (iser_conn_terminate(iser_conn)) {
830
		if (iser_conn->iscsi_conn)
831 832
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);
833 834 835
		else
			iser_err("iscsi_iser connection isn't bound\n");
	}
836 837 838 839 840 841
}

static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
				 bool destroy_device)
{
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
842

843 844 845 846
	/*
	 * We are not guaranteed that we visited disconnected_handler
	 * by now, call it here to be safe that we handle CM drep
	 * and flush errors.
847
	 */
848 849 850 851
	iser_disconnected_handler(cma_id);
	iser_free_ib_conn_res(iser_conn, destroy_device);
	complete(&iser_conn->ib_completion);
};
852 853 854

static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
855
	struct iser_conn *iser_conn;
856
	int ret = 0;
857

858
	iser_conn = (struct iser_conn *)cma_id->context;
859 860
	iser_info("event %d status %d conn %p id %p\n",
		  event->event, event->status, cma_id->context, cma_id);
861

862
	mutex_lock(&iser_conn->state_mutex);
863 864
	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
865
		iser_addr_handler(cma_id);
866 867
		break;
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
868
		iser_route_handler(cma_id);
869 870 871 872 873 874 875 876 877
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		iser_connected_handler(cma_id);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
	case RDMA_CM_EVENT_ROUTE_ERROR:
	case RDMA_CM_EVENT_CONNECT_ERROR:
	case RDMA_CM_EVENT_UNREACHABLE:
	case RDMA_CM_EVENT_REJECTED:
878
		iser_connect_error(cma_id);
879 880
		break;
	case RDMA_CM_EVENT_DISCONNECTED:
881
	case RDMA_CM_EVENT_ADDR_CHANGE:
882
		iser_disconnected_handler(cma_id);
883
		break;
884 885 886 887 888 889 890 891 892 893 894 895 896
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		/*
		 * we *must* destroy the device as we cannot rely
		 * on iscsid to be around to initiate error handling.
		 * also implicitly destroy the cma_id.
		 */
		iser_cleanup_handler(cma_id, true);
		iser_conn->ib_conn.cma_id = NULL;
		ret = 1;
		break;
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
		iser_cleanup_handler(cma_id, false);
		break;
897
	default:
898
		iser_err("Unexpected RDMA CM event (%d)\n", event->event);
899 900
		break;
	}
901
	mutex_unlock(&iser_conn->state_mutex);
902 903

	return ret;
904 905
}

906
void iser_conn_init(struct iser_conn *iser_conn)
907
{
908
	iser_conn->state = ISER_CONN_INIT;
S
Sagi Grimberg 已提交
909 910
	iser_conn->ib_conn.post_recv_buf_count = 0;
	atomic_set(&iser_conn->ib_conn.post_send_buf_count, 0);
911
	init_completion(&iser_conn->stop_completion);
912
	init_completion(&iser_conn->ib_completion);
913 914
	init_completion(&iser_conn->up_completion);
	INIT_LIST_HEAD(&iser_conn->conn_list);
S
Sagi Grimberg 已提交
915
	spin_lock_init(&iser_conn->ib_conn.lock);
916
	mutex_init(&iser_conn->state_mutex);
917 918 919 920
}

 /**
 * starts the process of connecting to the target
921
 * sleeps until the connection is established or rejected
922
 */
923
int iser_connect(struct iser_conn   *iser_conn,
R
Roi Dayan 已提交
924 925
		 struct sockaddr    *src_addr,
		 struct sockaddr    *dst_addr,
926 927
		 int                 non_blocking)
{
S
Sagi Grimberg 已提交
928
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
929 930
	int err = 0;

931
	mutex_lock(&iser_conn->state_mutex);
932

933
	sprintf(iser_conn->name, "%pISp", dst_addr);
R
Roi Dayan 已提交
934

935
	iser_info("connecting to: %s\n", iser_conn->name);
936 937

	/* the device is known only --after-- address resolution */
S
Sagi Grimberg 已提交
938
	ib_conn->device = NULL;
939

940
	iser_conn->state = ISER_CONN_PENDING;
941

S
Sagi Grimberg 已提交
942 943 944 945 946
	ib_conn->cma_id = rdma_create_id(iser_cma_handler,
					 (void *)iser_conn,
					 RDMA_PS_TCP, IB_QPT_RC);
	if (IS_ERR(ib_conn->cma_id)) {
		err = PTR_ERR(ib_conn->cma_id);
947 948 949 950
		iser_err("rdma_create_id failed: %d\n", err);
		goto id_failure;
	}

S
Sagi Grimberg 已提交
951
	err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
952 953 954 955 956 957
	if (err) {
		iser_err("rdma_resolve_addr failed: %d\n", err);
		goto addr_failure;
	}

	if (!non_blocking) {
958
		wait_for_completion_interruptible(&iser_conn->up_completion);
959

960
		if (iser_conn->state != ISER_CONN_UP) {
961 962 963 964
			err =  -EIO;
			goto connect_failure;
		}
	}
965
	mutex_unlock(&iser_conn->state_mutex);
966 967

	mutex_lock(&ig.connlist_mutex);
968
	list_add(&iser_conn->conn_list, &ig.connlist);
969 970 971 972
	mutex_unlock(&ig.connlist_mutex);
	return 0;

id_failure:
S
Sagi Grimberg 已提交
973
	ib_conn->cma_id = NULL;
974
addr_failure:
975
	iser_conn->state = ISER_CONN_DOWN;
976
connect_failure:
977 978
	mutex_unlock(&iser_conn->state_mutex);
	iser_conn_release(iser_conn);
979 980 981 982 983 984 985 986
	return err;
}

/**
 * iser_reg_page_vec - Register physical memory
 *
 * returns: 0 on success, errno code on failure
 */
S
Sagi Grimberg 已提交
987
int iser_reg_page_vec(struct ib_conn *ib_conn,
988 989 990 991 992 993 994 995 996 997 998
		      struct iser_page_vec *page_vec,
		      struct iser_mem_reg  *mem_reg)
{
	struct ib_pool_fmr *mem;
	u64		   io_addr;
	u64		   *page_list;
	int		   status;

	page_list = page_vec->pages;
	io_addr	  = page_list[0];

S
Sagi Grimberg 已提交
999
	mem  = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
1000 1001
				    page_list,
				    page_vec->length,
1002
				    io_addr);
1003 1004 1005 1006 1007 1008 1009 1010 1011

	if (IS_ERR(mem)) {
		status = (int)PTR_ERR(mem);
		iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
		return status;
	}

	mem_reg->lkey  = mem->fmr->lkey;
	mem_reg->rkey  = mem->fmr->rkey;
1012
	mem_reg->len   = page_vec->length * SIZE_4K;
1013
	mem_reg->va    = io_addr;
1014
	mem_reg->is_mr = 1;
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
	mem_reg->mem_h = (void *)mem;

	mem_reg->va   += page_vec->offset;
	mem_reg->len   = page_vec->data_size;

	iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
		 "entry[0]: (0x%08lx,%ld)] -> "
		 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
		 page_vec, page_vec->length,
		 (unsigned long)page_vec->pages[0],
		 (unsigned long)page_vec->data_size,
		 (unsigned int)mem_reg->lkey, mem_reg->mem_h,
		 (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
	return 0;
}

/**
1032 1033
 * Unregister (previosuly registered using FMR) memory.
 * If memory is non-FMR does nothing.
1034
 */
1035 1036
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
			enum iser_data_dir cmd_dir)
1037
{
1038
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1039 1040
	int ret;

1041
	if (!reg->is_mr)
1042 1043
		return;

1044 1045 1046 1047 1048 1049 1050 1051 1052
	iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);

	ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
	if (ret)
		iser_err("ib_fmr_pool_unmap failed %d\n", ret);

	reg->mem_h = NULL;
}

1053 1054
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
			    enum iser_data_dir cmd_dir)
1055 1056
{
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1057
	struct iser_conn *iser_conn = iser_task->iser_conn;
S
Sagi Grimberg 已提交
1058
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1059 1060 1061 1062 1063 1064 1065
	struct fast_reg_descriptor *desc = reg->mem_h;

	if (!reg->is_mr)
		return;

	reg->mem_h = NULL;
	reg->is_mr = 0;
S
Sagi Grimberg 已提交
1066 1067 1068
	spin_lock_bh(&ib_conn->lock);
	list_add_tail(&desc->list, &ib_conn->fastreg.pool);
	spin_unlock_bh(&ib_conn->lock);
1069 1070
}

1071
int iser_post_recvl(struct iser_conn *iser_conn)
1072 1073
{
	struct ib_recv_wr rx_wr, *rx_wr_failed;
S
Sagi Grimberg 已提交
1074
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1075 1076 1077
	struct ib_sge	  sge;
	int ib_ret;

1078
	sge.addr   = iser_conn->login_resp_dma;
1079
	sge.length = ISER_RX_LOGIN_SIZE;
S
Sagi Grimberg 已提交
1080
	sge.lkey   = ib_conn->device->mr->lkey;
1081

1082
	rx_wr.wr_id   = (unsigned long)iser_conn->login_resp_buf;
1083 1084 1085 1086
	rx_wr.sg_list = &sge;
	rx_wr.num_sge = 1;
	rx_wr.next    = NULL;

S
Sagi Grimberg 已提交
1087 1088
	ib_conn->post_recv_buf_count++;
	ib_ret	= ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
1089 1090
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1091
		ib_conn->post_recv_buf_count--;
1092 1093 1094 1095
	}
	return ib_ret;
}

1096
int iser_post_recvm(struct iser_conn *iser_conn, int count)
1097 1098 1099
{
	struct ib_recv_wr *rx_wr, *rx_wr_failed;
	int i, ib_ret;
S
Sagi Grimberg 已提交
1100
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
1101
	unsigned int my_rx_head = iser_conn->rx_desc_head;
1102 1103
	struct iser_rx_desc *rx_desc;

S
Sagi Grimberg 已提交
1104
	for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1105
		rx_desc		= &iser_conn->rx_descs[my_rx_head];
1106 1107 1108 1109
		rx_wr->wr_id	= (unsigned long)rx_desc;
		rx_wr->sg_list	= &rx_desc->rx_sg;
		rx_wr->num_sge	= 1;
		rx_wr->next	= rx_wr + 1;
1110
		my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1111 1112 1113 1114 1115
	}

	rx_wr--;
	rx_wr->next = NULL; /* mark end of work requests list */

S
Sagi Grimberg 已提交
1116 1117
	ib_conn->post_recv_buf_count += count;
	ib_ret	= ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
1118 1119
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
S
Sagi Grimberg 已提交
1120
		ib_conn->post_recv_buf_count -= count;
1121
	} else
1122
		iser_conn->rx_desc_head = my_rx_head;
1123 1124 1125 1126
	return ib_ret;
}


1127 1128 1129 1130 1131
/**
 * iser_start_send - Initiate a Send DTO operation
 *
 * returns 0 on success, -1 on failure
 */
S
Sagi Grimberg 已提交
1132
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
1133
{
1134
	int		  ib_ret;
1135 1136
	struct ib_send_wr send_wr, *send_wr_failed;

S
Sagi Grimberg 已提交
1137
	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1138 1139
				      tx_desc->dma_addr, ISER_HEADERS_LEN,
				      DMA_TO_DEVICE);
1140 1141 1142

	send_wr.next	   = NULL;
	send_wr.wr_id	   = (unsigned long)tx_desc;
1143 1144
	send_wr.sg_list	   = tx_desc->tx_sg;
	send_wr.num_sge	   = tx_desc->num_sge;
1145
	send_wr.opcode	   = IB_WR_SEND;
1146
	send_wr.send_flags = IB_SEND_SIGNALED;
1147

S
Sagi Grimberg 已提交
1148
	atomic_inc(&ib_conn->post_send_buf_count);
1149

S
Sagi Grimberg 已提交
1150
	ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
1151 1152
	if (ib_ret) {
		iser_err("ib_post_send failed, ret:%d\n", ib_ret);
S
Sagi Grimberg 已提交
1153
		atomic_dec(&ib_conn->post_send_buf_count);
1154
	}
1155
	return ib_ret;
1156 1157
}

1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
/**
 * iser_handle_comp_error() - Handle error completion
 * @desc:      iser TX descriptor
 * @ib_conn:   connection RDMA resources
 * @wc:        work completion
 *
 * Notes: We may handle a FLUSH error completion and in this case
 *        we only cleanup in case TX type was DATAOUT. For non-FLUSH
 *        error completion we should also notify iscsi layer that
 *        connection is failed (in case we passed bind stage).
 */
static void
iser_handle_comp_error(struct iser_tx_desc *desc,
		       struct ib_conn *ib_conn,
		       struct ib_wc *wc)
1173
{
1174 1175 1176 1177 1178 1179 1180 1181
	struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
						   ib_conn);

	if (wc->status != IB_WC_WR_FLUSH_ERR)
		if (iser_conn->iscsi_conn)
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);

1182 1183
	if (desc && desc->type == ISCSI_TX_DATAOUT)
		kmem_cache_free(ig.desc_cache, desc);
1184 1185
}

1186
static int iser_drain_tx_cq(struct iser_comp *comp)
1187
{
1188
	struct ib_cq *cq = comp->tx_cq;
1189
	struct ib_wc  wc;
1190
	struct iser_tx_desc *tx_desc;
S
Sagi Grimberg 已提交
1191
	struct ib_conn *ib_conn;
1192 1193 1194
	int completed_tx = 0;

	while (ib_poll_cq(cq, 1, &wc) == 1) {
1195
		tx_desc	= (struct iser_tx_desc *) (unsigned long) wc.wr_id;
S
Sagi Grimberg 已提交
1196
		ib_conn = wc.qp->qp_context;
1197 1198
		if (wc.status == IB_WC_SUCCESS) {
			if (wc.opcode == IB_WC_SEND)
S
Sagi Grimberg 已提交
1199
				iser_snd_completion(tx_desc, ib_conn);
1200
			else
1201 1202 1203 1204
				iser_err("expected opcode %d got %d\n",
					IB_WC_SEND, wc.opcode);
		} else {
			iser_err("tx id %llx status %d vend_err %x\n",
1205
				 wc.wr_id, wc.status, wc.vendor_err);
1206
			if (wc.wr_id != ISER_FASTREG_LI_WRID) {
S
Sagi Grimberg 已提交
1207
				atomic_dec(&ib_conn->post_send_buf_count);
1208
				iser_handle_comp_error(tx_desc, ib_conn, &wc);
1209
			}
1210 1211 1212 1213 1214 1215 1216
		}
		completed_tx++;
	}
	return completed_tx;
}


1217 1218
static void iser_cq_tasklet_fn(unsigned long data)
{
1219 1220 1221 1222 1223
	struct iser_comp *comp = (struct iser_comp *)data;
	struct ib_cq *cq = comp->rx_cq;
	struct ib_wc wc;
	struct iser_rx_desc *desc;
	unsigned long xfer_len;
S
Sagi Grimberg 已提交
1224
	struct ib_conn *ib_conn;
1225 1226 1227 1228 1229
	int completed_tx, completed_rx = 0;

	/* First do tx drain, so in a case where we have rx flushes and a successful
	 * tx completion we will still go through completion error handling.
	 */
1230
	completed_tx = iser_drain_tx_cq(comp);
1231 1232

	while (ib_poll_cq(cq, 1, &wc) == 1) {
1233
		desc	 = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
1234
		BUG_ON(desc == NULL);
S
Sagi Grimberg 已提交
1235
		ib_conn = wc.qp->qp_context;
1236
		if (wc.status == IB_WC_SUCCESS) {
1237
			if (wc.opcode == IB_WC_RECV) {
1238
				xfer_len = (unsigned long)wc.byte_len;
S
Sagi Grimberg 已提交
1239
				iser_rcv_completion(desc, xfer_len, ib_conn);
1240 1241 1242
			} else
				iser_err("expected opcode %d got %d\n",
					IB_WC_RECV, wc.opcode);
1243
		} else {
1244
			if (wc.status != IB_WC_WR_FLUSH_ERR)
1245
				iser_err("rx id %llx status %d vend_err %x\n",
1246
					wc.wr_id, wc.status, wc.vendor_err);
S
Sagi Grimberg 已提交
1247
			ib_conn->post_recv_buf_count--;
1248
			iser_handle_comp_error(NULL, ib_conn, &wc);
1249
		}
1250 1251
		completed_rx++;
		if (!(completed_rx & 63))
1252
			completed_tx += iser_drain_tx_cq(comp);
1253 1254
		if (completed_rx >= iser_cq_poll_limit)
			break;
1255 1256 1257 1258
	}
	/* #warning "it is assumed here that arming CQ only once its empty" *
	 * " would not cause interrupts to be missed"                       */
	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1259 1260

	iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
1261 1262 1263 1264
}

static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
{
1265
	struct iser_comp *comp = cq_context;
1266

1267
	tasklet_schedule(&comp->tasklet);
1268
}
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293

u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
			     enum iser_data_dir cmd_dir, sector_t *sector)
{
	struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
	struct fast_reg_descriptor *desc = reg->mem_h;
	unsigned long sector_size = iser_task->sc->device->sector_size;
	struct ib_mr_status mr_status;
	int ret;

	if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
		desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
		ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
					 IB_MR_CHECK_SIG_STATUS, &mr_status);
		if (ret) {
			pr_err("ib_check_mr_status failed, ret %d\n", ret);
			goto err;
		}

		if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
			sector_t sector_off = mr_status.sig_err.sig_err_offset;

			do_div(sector_off, sector_size + 8);
			*sector = scsi_get_lba(iser_task->sc) + sector_off;

1294
			pr_err("PI error found type %d at sector %llx "
1295
			       "expected %x vs actual %x\n",
1296 1297
			       mr_status.sig_err.err_type,
			       (unsigned long long)*sector,
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
			       mr_status.sig_err.expected,
			       mr_status.sig_err.actual);

			switch (mr_status.sig_err.err_type) {
			case IB_SIG_BAD_GUARD:
				return 0x1;
			case IB_SIG_BAD_REFTAG:
				return 0x3;
			case IB_SIG_BAD_APPTAG:
				return 0x2;
			}
		}
	}

	return 0;
err:
	/* Not alot we can do here, return ambiguous guard error */
	return 0x1;
}