ib_srp.c 93.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

33
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34

35 36 37 38 39 40 41
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/random.h>
42
#include <linux/jiffies.h>
43
#include <rdma/ib_cache.h>
44

A
Arun Sharma 已提交
45
#include <linux/atomic.h>
46 47 48 49

#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_dbg.h>
50
#include <scsi/scsi_tcq.h>
51
#include <scsi/srp.h>
52
#include <scsi/scsi_transport_srp.h>
53 54 55 56 57

#include "ib_srp.h"

#define DRV_NAME	"ib_srp"
#define PFX		DRV_NAME ": "
58 59
#define DRV_VERSION	"1.0"
#define DRV_RELDATE	"July 1, 2013"
60 61

MODULE_AUTHOR("Roland Dreier");
62
MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63
MODULE_LICENSE("Dual BSD/GPL");
64 65
MODULE_VERSION(DRV_VERSION);
MODULE_INFO(release_date, DRV_RELDATE);
66

67 68
static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
69 70
static unsigned int indirect_sg_entries;
static bool allow_ext_sg;
71
static bool prefer_fr;
72
static bool register_always;
73
static int topspin_workarounds = 1;
74

75 76
module_param(srp_sg_tablesize, uint, 0444);
MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77

78 79 80
module_param(cmd_sg_entries, uint, 0444);
MODULE_PARM_DESC(cmd_sg_entries,
		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81

82 83 84 85 86 87 88 89
module_param(indirect_sg_entries, uint, 0444);
MODULE_PARM_DESC(indirect_sg_entries,
		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");

module_param(allow_ext_sg, bool, 0444);
MODULE_PARM_DESC(allow_ext_sg,
		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");

90 91 92 93
module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC(topspin_workarounds,
		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");

94 95 96 97
module_param(prefer_fr, bool, 0444);
MODULE_PARM_DESC(prefer_fr,
"Whether to use fast registration if both FMR and fast registration are supported");

98 99 100 101
module_param(register_always, bool, 0444);
MODULE_PARM_DESC(register_always,
		 "Use memory registration even for contiguous memory regions");

102
static const struct kernel_param_ops srp_tmo_ops;
103

104 105 106 107 108
static int srp_reconnect_delay = 10;
module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
		S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");

109 110 111 112 113 114 115 116
static int srp_fast_io_fail_tmo = 15;
module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
		S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(fast_io_fail_tmo,
		 "Number of seconds between the observation of a transport"
		 " layer error and failing all I/O. \"off\" means that this"
		 " functionality is disabled.");

117
static int srp_dev_loss_tmo = 600;
118 119 120 121 122 123 124 125 126 127
module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
		S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dev_loss_tmo,
		 "Maximum number of seconds that the SRP transport should"
		 " insulate transport layer errors. After this time has been"
		 " exceeded the SCSI host is removed. Should be"
		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
		 " if fast_io_fail_tmo has not been set. \"off\" means that"
		 " this functionality is disabled.");

B
Bart Van Assche 已提交
128 129 130 131 132
static unsigned ch_count;
module_param(ch_count, uint, 0444);
MODULE_PARM_DESC(ch_count,
		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");

133 134
static void srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device);
135 136
static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
137 138
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);

139
static struct scsi_transport_template *ib_srp_transport_template;
140
static struct workqueue_struct *srp_remove_wq;
141

142 143 144 145 146 147
static struct ib_client srp_client = {
	.name   = "srp",
	.add    = srp_add_one,
	.remove = srp_remove_one
};

148 149
static struct ib_sa_client srp_sa_client;

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
{
	int tmo = *(int *)kp->arg;

	if (tmo >= 0)
		return sprintf(buffer, "%d", tmo);
	else
		return sprintf(buffer, "off");
}

static int srp_tmo_set(const char *val, const struct kernel_param *kp)
{
	int tmo, res;

	if (strncmp(val, "off", 3) != 0) {
		res = kstrtoint(val, 0, &tmo);
		if (res)
			goto out;
	} else {
		tmo = -1;
	}
171 172 173 174 175
	if (kp->arg == &srp_reconnect_delay)
		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
				    srp_dev_loss_tmo);
	else if (kp->arg == &srp_fast_io_fail_tmo)
		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
176
	else
177 178
		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
				    tmo);
179 180 181 182 183 184 185 186
	if (res)
		goto out;
	*(int *)kp->arg = tmo;

out:
	return res;
}

187
static const struct kernel_param_ops srp_tmo_ops = {
188 189 190 191
	.get = srp_tmo_get,
	.set = srp_tmo_set,
};

192 193 194 195 196 197 198 199 200 201
static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
{
	return (struct srp_target_port *) host->hostdata;
}

static const char *srp_target_info(struct Scsi_Host *host)
{
	return host_to_target(host)->target_name;
}

202 203 204
static int srp_target_is_topspin(struct srp_target_port *target)
{
	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
205
	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
206 207

	return topspin_workarounds &&
208 209
		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
210 211
}

212 213 214 215 216 217 218 219 220 221 222 223 224 225
static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
				   gfp_t gfp_mask,
				   enum dma_data_direction direction)
{
	struct srp_iu *iu;

	iu = kmalloc(sizeof *iu, gfp_mask);
	if (!iu)
		goto out;

	iu->buf = kzalloc(size, gfp_mask);
	if (!iu->buf)
		goto out_free_iu;

226 227 228
	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
				    direction);
	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
		goto out_free_buf;

	iu->size      = size;
	iu->direction = direction;

	return iu;

out_free_buf:
	kfree(iu->buf);
out_free_iu:
	kfree(iu);
out:
	return NULL;
}

static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
{
	if (!iu)
		return;

249 250
	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
			    iu->direction);
251 252 253 254 255 256
	kfree(iu->buf);
	kfree(iu);
}

static void srp_qp_event(struct ib_event *event, void *context)
{
257 258
	pr_debug("QP event %s (%d)\n",
		 ib_event_msg(event->event), event->event);
259 260 261 262 263 264 265 266 267 268 269 270
}

static int srp_init_qp(struct srp_target_port *target,
		       struct ib_qp *qp)
{
	struct ib_qp_attr *attr;
	int ret;

	attr = kmalloc(sizeof *attr, GFP_KERNEL);
	if (!attr)
		return -ENOMEM;

271 272 273 274
	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
				  target->srp_host->port,
				  be16_to_cpu(target->pkey),
				  &attr->pkey_index);
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
	if (ret)
		goto out;

	attr->qp_state        = IB_QPS_INIT;
	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
				    IB_ACCESS_REMOTE_WRITE);
	attr->port_num        = target->srp_host->port;

	ret = ib_modify_qp(qp, attr,
			   IB_QP_STATE		|
			   IB_QP_PKEY_INDEX	|
			   IB_QP_ACCESS_FLAGS	|
			   IB_QP_PORT);

out:
	kfree(attr);
	return ret;
}

294
static int srp_new_cm_id(struct srp_rdma_ch *ch)
D
David Dillow 已提交
295
{
296
	struct srp_target_port *target = ch->target;
D
David Dillow 已提交
297 298
	struct ib_cm_id *new_cm_id;

299
	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
300
				    srp_cm_handler, ch);
D
David Dillow 已提交
301 302 303
	if (IS_ERR(new_cm_id))
		return PTR_ERR(new_cm_id);

304 305 306 307 308 309 310
	if (ch->cm_id)
		ib_destroy_cm_id(ch->cm_id);
	ch->cm_id = new_cm_id;
	ch->path.sgid = target->sgid;
	ch->path.dgid = target->orig_dgid;
	ch->path.pkey = target->pkey;
	ch->path.service_id = target->service_id;
D
David Dillow 已提交
311 312 313 314

	return 0;
}

315 316 317 318 319 320 321 322 323
static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
{
	struct srp_device *dev = target->srp_host->srp_dev;
	struct ib_fmr_pool_param fmr_param;

	memset(&fmr_param, 0, sizeof(fmr_param));
	fmr_param.pool_size	    = target->scsi_host->can_queue;
	fmr_param.dirty_watermark   = fmr_param.pool_size / 4;
	fmr_param.cache		    = 1;
324 325
	fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
	fmr_param.page_shift	    = ilog2(dev->mr_page_size);
326 327 328 329 330 331 332
	fmr_param.access	    = (IB_ACCESS_LOCAL_WRITE |
				       IB_ACCESS_REMOTE_WRITE |
				       IB_ACCESS_REMOTE_READ);

	return ib_create_fmr_pool(dev->pd, &fmr_param);
}

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
/**
 * srp_destroy_fr_pool() - free the resources owned by a pool
 * @pool: Fast registration pool to be destroyed.
 */
static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
{
	int i;
	struct srp_fr_desc *d;

	if (!pool)
		return;

	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
		if (d->frpl)
			ib_free_fast_reg_page_list(d->frpl);
		if (d->mr)
			ib_dereg_mr(d->mr);
	}
	kfree(pool);
}

/**
 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
 * @device:            IB device to allocate fast registration descriptors for.
 * @pd:                Protection domain associated with the FR descriptors.
 * @pool_size:         Number of descriptors to allocate.
 * @max_page_list_len: Maximum fast registration work request page list length.
 */
static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
					      struct ib_pd *pd, int pool_size,
					      int max_page_list_len)
{
	struct srp_fr_pool *pool;
	struct srp_fr_desc *d;
	struct ib_mr *mr;
	struct ib_fast_reg_page_list *frpl;
	int i, ret = -EINVAL;

	if (pool_size <= 0)
		goto err;
	ret = -ENOMEM;
	pool = kzalloc(sizeof(struct srp_fr_pool) +
		       pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
	if (!pool)
		goto err;
	pool->size = pool_size;
	pool->max_page_list_len = max_page_list_len;
	spin_lock_init(&pool->lock);
	INIT_LIST_HEAD(&pool->free_list);

	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
		mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
		if (IS_ERR(mr)) {
			ret = PTR_ERR(mr);
			goto destroy_pool;
		}
		d->mr = mr;
		frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
		if (IS_ERR(frpl)) {
			ret = PTR_ERR(frpl);
			goto destroy_pool;
		}
		d->frpl = frpl;
		list_add_tail(&d->entry, &pool->free_list);
	}

out:
	return pool;

destroy_pool:
	srp_destroy_fr_pool(pool);

err:
	pool = ERR_PTR(ret);
	goto out;
}

/**
 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
 * @pool: Pool to obtain descriptor from.
 */
static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
{
	struct srp_fr_desc *d = NULL;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	if (!list_empty(&pool->free_list)) {
		d = list_first_entry(&pool->free_list, typeof(*d), entry);
		list_del(&d->entry);
	}
	spin_unlock_irqrestore(&pool->lock, flags);

	return d;
}

/**
 * srp_fr_pool_put() - put an FR descriptor back in the free list
 * @pool: Pool the descriptor was allocated from.
 * @desc: Pointer to an array of fast registration descriptor pointers.
 * @n:    Number of descriptors to put back.
 *
 * Note: The caller must already have queued an invalidation request for
 * desc->mr->rkey before calling this function.
 */
static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
			    int n)
{
	unsigned long flags;
	int i;

	spin_lock_irqsave(&pool->lock, flags);
	for (i = 0; i < n; i++)
		list_add(&desc[i]->entry, &pool->free_list);
	spin_unlock_irqrestore(&pool->lock, flags);
}

static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
{
	struct srp_device *dev = target->srp_host->srp_dev;

	return srp_create_fr_pool(dev->dev, dev->pd,
				  target->scsi_host->can_queue,
				  dev->max_pages_per_mr);
}

459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
/**
 * srp_destroy_qp() - destroy an RDMA queue pair
 * @ch: SRP RDMA channel.
 *
 * Change a queue pair into the error state and wait until all receive
 * completions have been processed before destroying it. This avoids that
 * the receive completion handler can access the queue pair while it is
 * being destroyed.
 */
static void srp_destroy_qp(struct srp_rdma_ch *ch)
{
	static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
	static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
	struct ib_recv_wr *bad_wr;
	int ret;

	/* Destroying a QP and reusing ch->done is only safe if not connected */
476
	WARN_ON_ONCE(ch->connected);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492

	ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
	WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
	if (ret)
		goto out;

	init_completion(&ch->done);
	ret = ib_post_recv(ch->qp, &wr, &bad_wr);
	WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
	if (ret == 0)
		wait_for_completion(&ch->done);

out:
	ib_destroy_qp(ch->qp);
}

493
static int srp_create_ch_ib(struct srp_rdma_ch *ch)
494
{
495
	struct srp_target_port *target = ch->target;
496
	struct srp_device *dev = target->srp_host->srp_dev;
497
	struct ib_qp_init_attr *init_attr;
498 499
	struct ib_cq *recv_cq, *send_cq;
	struct ib_qp *qp;
500
	struct ib_fmr_pool *fmr_pool = NULL;
501 502
	struct srp_fr_pool *fr_pool = NULL;
	const int m = 1 + dev->use_fast_reg;
503
	struct ib_cq_init_attr cq_attr = {};
504 505 506 507 508 509
	int ret;

	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
	if (!init_attr)
		return -ENOMEM;

510
	/* + 1 for SRP_LAST_WR_ID */
511 512
	cq_attr.cqe = target->queue_size + 1;
	cq_attr.comp_vector = ch->comp_vector;
513
	recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
514
			       &cq_attr);
515 516
	if (IS_ERR(recv_cq)) {
		ret = PTR_ERR(recv_cq);
517
		goto err;
518 519
	}

520 521
	cq_attr.cqe = m * target->queue_size;
	cq_attr.comp_vector = ch->comp_vector;
522
	send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
523
			       &cq_attr);
524 525
	if (IS_ERR(send_cq)) {
		ret = PTR_ERR(send_cq);
526
		goto err_recv_cq;
527 528
	}

529
	ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
530 531

	init_attr->event_handler       = srp_qp_event;
532
	init_attr->cap.max_send_wr     = m * target->queue_size;
533
	init_attr->cap.max_recv_wr     = target->queue_size + 1;
534 535
	init_attr->cap.max_recv_sge    = 1;
	init_attr->cap.max_send_sge    = 1;
536
	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
537
	init_attr->qp_type             = IB_QPT_RC;
538 539
	init_attr->send_cq             = send_cq;
	init_attr->recv_cq             = recv_cq;
540

541
	qp = ib_create_qp(dev->pd, init_attr);
542 543
	if (IS_ERR(qp)) {
		ret = PTR_ERR(qp);
544
		goto err_send_cq;
545 546
	}

547
	ret = srp_init_qp(target, qp);
548 549
	if (ret)
		goto err_qp;
550

551 552 553 554 555 556 557 558
	if (dev->use_fast_reg && dev->has_fr) {
		fr_pool = srp_alloc_fr_pool(target);
		if (IS_ERR(fr_pool)) {
			ret = PTR_ERR(fr_pool);
			shost_printk(KERN_WARNING, target->scsi_host, PFX
				     "FR pool allocation failed (%d)\n", ret);
			goto err_qp;
		}
559 560 561
		if (ch->fr_pool)
			srp_destroy_fr_pool(ch->fr_pool);
		ch->fr_pool = fr_pool;
562
	} else if (!dev->use_fast_reg && dev->has_fmr) {
563 564 565 566 567 568 569
		fmr_pool = srp_alloc_fmr_pool(target);
		if (IS_ERR(fmr_pool)) {
			ret = PTR_ERR(fmr_pool);
			shost_printk(KERN_WARNING, target->scsi_host, PFX
				     "FMR pool allocation failed (%d)\n", ret);
			goto err_qp;
		}
570 571 572
		if (ch->fmr_pool)
			ib_destroy_fmr_pool(ch->fmr_pool);
		ch->fmr_pool = fmr_pool;
573 574
	}

575
	if (ch->qp)
576
		srp_destroy_qp(ch);
577 578 579 580
	if (ch->recv_cq)
		ib_destroy_cq(ch->recv_cq);
	if (ch->send_cq)
		ib_destroy_cq(ch->send_cq);
581

582 583 584
	ch->qp = qp;
	ch->recv_cq = recv_cq;
	ch->send_cq = send_cq;
585

586 587 588 589
	kfree(init_attr);
	return 0;

err_qp:
590
	ib_destroy_qp(qp);
591 592

err_send_cq:
593
	ib_destroy_cq(send_cq);
594 595

err_recv_cq:
596
	ib_destroy_cq(recv_cq);
597 598

err:
599 600 601 602
	kfree(init_attr);
	return ret;
}

603 604
/*
 * Note: this function may be called without srp_alloc_iu_bufs() having been
605
 * invoked. Hence the ch->[rt]x_ring checks.
606
 */
607 608
static void srp_free_ch_ib(struct srp_target_port *target,
			   struct srp_rdma_ch *ch)
609
{
610
	struct srp_device *dev = target->srp_host->srp_dev;
611 612
	int i;

B
Bart Van Assche 已提交
613 614 615
	if (!ch->target)
		return;

616 617 618
	if (ch->cm_id) {
		ib_destroy_cm_id(ch->cm_id);
		ch->cm_id = NULL;
619 620
	}

B
Bart Van Assche 已提交
621 622 623 624
	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
	if (!ch->qp)
		return;

625
	if (dev->use_fast_reg) {
626 627
		if (ch->fr_pool)
			srp_destroy_fr_pool(ch->fr_pool);
628
	} else {
629 630
		if (ch->fmr_pool)
			ib_destroy_fmr_pool(ch->fmr_pool);
631
	}
632
	srp_destroy_qp(ch);
633 634
	ib_destroy_cq(ch->send_cq);
	ib_destroy_cq(ch->recv_cq);
635

B
Bart Van Assche 已提交
636 637 638 639 640 641 642 643
	/*
	 * Avoid that the SCSI error handler tries to use this channel after
	 * it has been freed. The SCSI error handler can namely continue
	 * trying to perform recovery actions after scsi_remove_host()
	 * returned.
	 */
	ch->target = NULL;

644 645
	ch->qp = NULL;
	ch->send_cq = ch->recv_cq = NULL;
646

647
	if (ch->rx_ring) {
648
		for (i = 0; i < target->queue_size; ++i)
649 650 651
			srp_free_iu(target->srp_host, ch->rx_ring[i]);
		kfree(ch->rx_ring);
		ch->rx_ring = NULL;
652
	}
653
	if (ch->tx_ring) {
654
		for (i = 0; i < target->queue_size; ++i)
655 656 657
			srp_free_iu(target->srp_host, ch->tx_ring[i]);
		kfree(ch->tx_ring);
		ch->tx_ring = NULL;
658
	}
659 660 661 662
}

static void srp_path_rec_completion(int status,
				    struct ib_sa_path_rec *pathrec,
663
				    void *ch_ptr)
664
{
665 666
	struct srp_rdma_ch *ch = ch_ptr;
	struct srp_target_port *target = ch->target;
667

668
	ch->status = status;
669
	if (status)
670 671
		shost_printk(KERN_ERR, target->scsi_host,
			     PFX "Got failed path rec status %d\n", status);
672
	else
673 674
		ch->path = *pathrec;
	complete(&ch->done);
675 676
}

677
static int srp_lookup_path(struct srp_rdma_ch *ch)
678
{
679
	struct srp_target_port *target = ch->target;
680 681
	int ret;

682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
	ch->path.numb_path = 1;

	init_completion(&ch->done);

	ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
					       target->srp_host->srp_dev->dev,
					       target->srp_host->port,
					       &ch->path,
					       IB_SA_PATH_REC_SERVICE_ID |
					       IB_SA_PATH_REC_DGID	 |
					       IB_SA_PATH_REC_SGID	 |
					       IB_SA_PATH_REC_NUMB_PATH	 |
					       IB_SA_PATH_REC_PKEY,
					       SRP_PATH_REC_TIMEOUT_MS,
					       GFP_KERNEL,
					       srp_path_rec_completion,
					       ch, &ch->path_query);
	if (ch->path_query_id < 0)
		return ch->path_query_id;

	ret = wait_for_completion_interruptible(&ch->done);
703 704
	if (ret < 0)
		return ret;
705

706
	if (ch->status < 0)
707 708
		shost_printk(KERN_WARNING, target->scsi_host,
			     PFX "Path record query failed\n");
709

710
	return ch->status;
711 712
}

B
Bart Van Assche 已提交
713
static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
714
{
715
	struct srp_target_port *target = ch->target;
716 717 718 719 720 721 722 723 724 725
	struct {
		struct ib_cm_req_param param;
		struct srp_login_req   priv;
	} *req = NULL;
	int status;

	req = kzalloc(sizeof *req, GFP_KERNEL);
	if (!req)
		return -ENOMEM;

726
	req->param.primary_path		      = &ch->path;
727 728
	req->param.alternate_path 	      = NULL;
	req->param.service_id 		      = target->service_id;
729 730
	req->param.qp_num		      = ch->qp->qp_num;
	req->param.qp_type		      = ch->qp->qp_type;
731 732 733 734 735 736 737 738 739 740 741 742 743 744
	req->param.private_data 	      = &req->priv;
	req->param.private_data_len 	      = sizeof req->priv;
	req->param.flow_control 	      = 1;

	get_random_bytes(&req->param.starting_psn, 4);
	req->param.starting_psn 	     &= 0xffffff;

	/*
	 * Pick some arbitrary defaults here; we could make these
	 * module parameters if anyone cared about setting them.
	 */
	req->param.responder_resources	      = 4;
	req->param.remote_cm_response_timeout = 20;
	req->param.local_cm_response_timeout  = 20;
745
	req->param.retry_count                = target->tl_retry_count;
746 747 748 749 750
	req->param.rnr_retry_count 	      = 7;
	req->param.max_cm_retries 	      = 15;

	req->priv.opcode     	= SRP_LOGIN_REQ;
	req->priv.tag        	= 0;
751
	req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
752 753
	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
					      SRP_BUF_FORMAT_INDIRECT);
B
Bart Van Assche 已提交
754 755
	req->priv.req_flags	= (multich ? SRP_MULTICHAN_MULTI :
				   SRP_MULTICHAN_SINGLE);
756
	/*
R
Roland Dreier 已提交
757
	 * In the published SRP specification (draft rev. 16a), the
758 759 760 761 762 763 764 765 766
	 * port identifier format is 8 bytes of ID extension followed
	 * by 8 bytes of GUID.  Older drafts put the two halves in the
	 * opposite order, so that the GUID comes first.
	 *
	 * Targets conforming to these obsolete drafts can be
	 * recognized by the I/O Class they report.
	 */
	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
		memcpy(req->priv.initiator_port_id,
767
		       &target->sgid.global.interface_id, 8);
768
		memcpy(req->priv.initiator_port_id + 8,
769
		       &target->initiator_ext, 8);
770 771 772 773
		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
	} else {
		memcpy(req->priv.initiator_port_id,
774 775
		       &target->initiator_ext, 8);
		memcpy(req->priv.initiator_port_id + 8,
776
		       &target->sgid.global.interface_id, 8);
777 778 779 780
		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
	}

781 782
	/*
	 * Topspin/Cisco SRP targets will reject our login unless we
783 784
	 * zero out the first 8 bytes of our initiator port ID and set
	 * the second 8 bytes to the local node GUID.
785
	 */
786
	if (srp_target_is_topspin(target)) {
787 788 789
		shost_printk(KERN_DEBUG, target->scsi_host,
			     PFX "Topspin/Cisco initiator port ID workaround "
			     "activated for target GUID %016llx\n",
790
			     be64_to_cpu(target->ioc_guid));
791
		memset(req->priv.initiator_port_id, 0, 8);
792
		memcpy(req->priv.initiator_port_id + 8,
793
		       &target->srp_host->srp_dev->dev->node_guid, 8);
794 795
	}

796
	status = ib_send_cm_req(ch->cm_id, &req->param);
797 798 799 800 801 802

	kfree(req);

	return status;
}

803 804 805 806 807 808 809 810 811 812 813 814
static bool srp_queue_remove_work(struct srp_target_port *target)
{
	bool changed = false;

	spin_lock_irq(&target->lock);
	if (target->state != SRP_TARGET_REMOVED) {
		target->state = SRP_TARGET_REMOVED;
		changed = true;
	}
	spin_unlock_irq(&target->lock);

	if (changed)
815
		queue_work(srp_remove_wq, &target->remove_work);
816 817 818 819

	return changed;
}

820 821
static void srp_disconnect_target(struct srp_target_port *target)
{
B
Bart Van Assche 已提交
822 823
	struct srp_rdma_ch *ch;
	int i;
824

825
	/* XXX should send SRP_I_LOGOUT request */
826

827 828 829 830 831 832
	for (i = 0; i < target->ch_count; i++) {
		ch = &target->ch[i];
		ch->connected = false;
		if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
			shost_printk(KERN_DEBUG, target->scsi_host,
				     PFX "Sending CM DREQ failed\n");
833
		}
834
	}
835 836
}

837 838
static void srp_free_req_data(struct srp_target_port *target,
			      struct srp_rdma_ch *ch)
839
{
840 841
	struct srp_device *dev = target->srp_host->srp_dev;
	struct ib_device *ibdev = dev->dev;
842 843 844
	struct srp_request *req;
	int i;

845
	if (!ch->req_ring)
846 847 848
		return;

	for (i = 0; i < target->req_ring_size; ++i) {
849
		req = &ch->req_ring[i];
850 851 852 853
		if (dev->use_fast_reg)
			kfree(req->fr_list);
		else
			kfree(req->fmr_list);
854
		kfree(req->map_page);
855 856 857 858 859 860
		if (req->indirect_dma_addr) {
			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
					    target->indirect_size,
					    DMA_TO_DEVICE);
		}
		kfree(req->indirect_desc);
861
	}
862

863 864
	kfree(ch->req_ring);
	ch->req_ring = NULL;
865 866
}

867
static int srp_alloc_req_data(struct srp_rdma_ch *ch)
868
{
869
	struct srp_target_port *target = ch->target;
870 871 872
	struct srp_device *srp_dev = target->srp_host->srp_dev;
	struct ib_device *ibdev = srp_dev->dev;
	struct srp_request *req;
873
	void *mr_list;
874 875 876
	dma_addr_t dma_addr;
	int i, ret = -ENOMEM;

877 878 879
	ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
			       GFP_KERNEL);
	if (!ch->req_ring)
880 881 882
		goto out;

	for (i = 0; i < target->req_ring_size; ++i) {
883
		req = &ch->req_ring[i];
884 885 886 887 888 889 890 891
		mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
				  GFP_KERNEL);
		if (!mr_list)
			goto out;
		if (srp_dev->use_fast_reg)
			req->fr_list = mr_list;
		else
			req->fmr_list = mr_list;
892
		req->map_page = kmalloc(srp_dev->max_pages_per_mr *
893
					sizeof(void *), GFP_KERNEL);
894 895
		if (!req->map_page)
			goto out;
896
		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
897
		if (!req->indirect_desc)
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
			goto out;

		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
					     target->indirect_size,
					     DMA_TO_DEVICE);
		if (ib_dma_mapping_error(ibdev, dma_addr))
			goto out;

		req->indirect_dma_addr = dma_addr;
	}
	ret = 0;

out:
	return ret;
}

914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
/**
 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
 * @shost: SCSI host whose attributes to remove from sysfs.
 *
 * Note: Any attributes defined in the host template and that did not exist
 * before invocation of this function will be ignored.
 */
static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
{
	struct device_attribute **attr;

	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
		device_remove_file(&shost->shost_dev, *attr);
}

929 930
static void srp_remove_target(struct srp_target_port *target)
{
B
Bart Van Assche 已提交
931 932
	struct srp_rdma_ch *ch;
	int i;
933

934 935
	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);

936
	srp_del_scsi_host_attr(target->scsi_host);
937
	srp_rport_get(target->rport);
938 939
	srp_remove_host(target->scsi_host);
	scsi_remove_host(target->scsi_host);
940
	srp_stop_rport_timers(target->rport);
941
	srp_disconnect_target(target);
B
Bart Van Assche 已提交
942 943 944 945
	for (i = 0; i < target->ch_count; i++) {
		ch = &target->ch[i];
		srp_free_ch_ib(target, ch);
	}
946
	cancel_work_sync(&target->tl_err_work);
947
	srp_rport_put(target->rport);
B
Bart Van Assche 已提交
948 949 950 951 952 953
	for (i = 0; i < target->ch_count; i++) {
		ch = &target->ch[i];
		srp_free_req_data(target, ch);
	}
	kfree(target->ch);
	target->ch = NULL;
954 955 956 957 958

	spin_lock(&target->srp_host->target_lock);
	list_del(&target->list);
	spin_unlock(&target->srp_host->target_lock);

959 960 961
	scsi_host_put(target->scsi_host);
}

D
David Howells 已提交
962
static void srp_remove_work(struct work_struct *work)
963
{
D
David Howells 已提交
964
	struct srp_target_port *target =
965
		container_of(work, struct srp_target_port, remove_work);
966

967
	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
968

969
	srp_remove_target(target);
970 971
}

972 973 974 975 976 977 978
static void srp_rport_delete(struct srp_rport *rport)
{
	struct srp_target_port *target = rport->lld_data;

	srp_queue_remove_work(target);
}

979 980 981 982 983 984 985 986 987 988 989 990 991 992
/**
 * srp_connected_ch() - number of connected channels
 * @target: SRP target port.
 */
static int srp_connected_ch(struct srp_target_port *target)
{
	int i, c = 0;

	for (i = 0; i < target->ch_count; i++)
		c += target->ch[i].connected;

	return c;
}

B
Bart Van Assche 已提交
993
static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
994
{
995
	struct srp_target_port *target = ch->target;
996 997
	int ret;

998
	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
999

1000
	ret = srp_lookup_path(ch);
1001 1002 1003 1004
	if (ret)
		return ret;

	while (1) {
1005
		init_completion(&ch->done);
B
Bart Van Assche 已提交
1006
		ret = srp_send_req(ch, multich);
1007 1008
		if (ret)
			return ret;
1009
		ret = wait_for_completion_interruptible(&ch->done);
1010 1011
		if (ret < 0)
			return ret;
1012 1013 1014 1015 1016 1017 1018

		/*
		 * The CM event handling code will set status to
		 * SRP_PORT_REDIRECT if we get a port redirect REJ
		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
		 * redirect REJ back.
		 */
1019
		switch (ch->status) {
1020
		case 0:
1021
			ch->connected = true;
1022 1023 1024
			return 0;

		case SRP_PORT_REDIRECT:
1025
			ret = srp_lookup_path(ch);
1026 1027 1028 1029 1030 1031 1032
			if (ret)
				return ret;
			break;

		case SRP_DLID_REDIRECT:
			break;

D
David Dillow 已提交
1033 1034
		case SRP_STALE_CONN:
			shost_printk(KERN_ERR, target->scsi_host, PFX
1035
				     "giving up on stale connection\n");
1036 1037
			ch->status = -ECONNRESET;
			return ch->status;
D
David Dillow 已提交
1038

1039
		default:
1040
			return ch->status;
1041 1042 1043 1044
		}
	}
}

1045
static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
{
	struct ib_send_wr *bad_wr;
	struct ib_send_wr wr = {
		.opcode		    = IB_WR_LOCAL_INV,
		.wr_id		    = LOCAL_INV_WR_ID_MASK,
		.next		    = NULL,
		.num_sge	    = 0,
		.send_flags	    = 0,
		.ex.invalidate_rkey = rkey,
	};

1057
	return ib_post_send(ch->qp, &wr, &bad_wr);
1058 1059
}

1060
static void srp_unmap_data(struct scsi_cmnd *scmnd,
1061
			   struct srp_rdma_ch *ch,
1062 1063
			   struct srp_request *req)
{
1064
	struct srp_target_port *target = ch->target;
1065 1066 1067
	struct srp_device *dev = target->srp_host->srp_dev;
	struct ib_device *ibdev = dev->dev;
	int i, res;
1068

1069
	if (!scsi_sglist(scmnd) ||
1070 1071 1072 1073
	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
		return;

1074 1075 1076 1077
	if (dev->use_fast_reg) {
		struct srp_fr_desc **pfr;

		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1078
			res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
1079 1080 1081 1082 1083 1084 1085 1086 1087
			if (res < 0) {
				shost_printk(KERN_ERR, target->scsi_host, PFX
				  "Queueing INV WR for rkey %#x failed (%d)\n",
				  (*pfr)->mr->rkey, res);
				queue_work(system_long_wq,
					   &target->tl_err_work);
			}
		}
		if (req->nmdesc)
1088
			srp_fr_pool_put(ch->fr_pool, req->fr_list,
1089 1090 1091 1092 1093 1094 1095
					req->nmdesc);
	} else {
		struct ib_pool_fmr **pfmr;

		for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
			ib_fmr_pool_unmap(*pfmr);
	}
1096

1097 1098
	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
			scmnd->sc_data_direction);
1099 1100
}

B
Bart Van Assche 已提交
1101 1102
/**
 * srp_claim_req - Take ownership of the scmnd associated with a request.
1103
 * @ch: SRP RDMA channel.
B
Bart Van Assche 已提交
1104
 * @req: SRP request.
1105
 * @sdev: If not NULL, only take ownership for this SCSI device.
B
Bart Van Assche 已提交
1106 1107 1108 1109 1110 1111
 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
 *         ownership of @req->scmnd if it equals @scmnd.
 *
 * Return value:
 * Either NULL or a pointer to the SCSI command the caller became owner of.
 */
1112
static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
B
Bart Van Assche 已提交
1113
				       struct srp_request *req,
1114
				       struct scsi_device *sdev,
B
Bart Van Assche 已提交
1115 1116 1117 1118
				       struct scsi_cmnd *scmnd)
{
	unsigned long flags;

1119
	spin_lock_irqsave(&ch->lock, flags);
1120 1121 1122
	if (req->scmnd &&
	    (!sdev || req->scmnd->device == sdev) &&
	    (!scmnd || req->scmnd == scmnd)) {
B
Bart Van Assche 已提交
1123 1124 1125 1126 1127
		scmnd = req->scmnd;
		req->scmnd = NULL;
	} else {
		scmnd = NULL;
	}
1128
	spin_unlock_irqrestore(&ch->lock, flags);
B
Bart Van Assche 已提交
1129 1130 1131 1132 1133 1134

	return scmnd;
}

/**
 * srp_free_req() - Unmap data and add request to the free request list.
1135
 * @ch:     SRP RDMA channel.
1136 1137 1138
 * @req:    Request to be freed.
 * @scmnd:  SCSI command associated with @req.
 * @req_lim_delta: Amount to be added to @target->req_lim.
B
Bart Van Assche 已提交
1139
 */
1140 1141
static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1142
{
1143 1144
	unsigned long flags;

1145
	srp_unmap_data(scmnd, ch, req);
B
Bart Van Assche 已提交
1146

1147 1148 1149
	spin_lock_irqsave(&ch->lock, flags);
	ch->req_lim += req_lim_delta;
	spin_unlock_irqrestore(&ch->lock, flags);
1150 1151
}

1152 1153
static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
			   struct scsi_device *sdev, int result)
1154
{
1155
	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
B
Bart Van Assche 已提交
1156 1157

	if (scmnd) {
1158
		srp_free_req(ch, req, scmnd, 0);
1159
		scmnd->result = result;
B
Bart Van Assche 已提交
1160 1161
		scmnd->scsi_done(scmnd);
	}
1162 1163
}

1164
static void srp_terminate_io(struct srp_rport *rport)
1165
{
1166
	struct srp_target_port *target = rport->lld_data;
B
Bart Van Assche 已提交
1167
	struct srp_rdma_ch *ch;
1168 1169
	struct Scsi_Host *shost = target->scsi_host;
	struct scsi_device *sdev;
B
Bart Van Assche 已提交
1170
	int i, j;
1171

1172 1173 1174 1175 1176 1177 1178
	/*
	 * Invoking srp_terminate_io() while srp_queuecommand() is running
	 * is not safe. Hence the warning statement below.
	 */
	shost_for_each_device(sdev, shost)
		WARN_ON_ONCE(sdev->request_queue->request_fn_active);

B
Bart Van Assche 已提交
1179 1180
	for (i = 0; i < target->ch_count; i++) {
		ch = &target->ch[i];
1181

B
Bart Van Assche 已提交
1182 1183 1184 1185 1186 1187
		for (j = 0; j < target->req_ring_size; ++j) {
			struct srp_request *req = &ch->req_ring[j];

			srp_finish_req(ch, req, NULL,
				       DID_TRANSPORT_FAILFAST << 16);
		}
1188 1189
	}
}
1190

1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
/*
 * It is up to the caller to ensure that srp_rport_reconnect() calls are
 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
 * srp_reset_device() or srp_reset_host() calls will occur while this function
 * is in progress. One way to realize that is not to call this function
 * directly but to call srp_reconnect_rport() instead since that last function
 * serializes calls of this function via rport->mutex and also blocks
 * srp_queuecommand() calls before invoking this function.
 */
static int srp_rport_reconnect(struct srp_rport *rport)
{
	struct srp_target_port *target = rport->lld_data;
B
Bart Van Assche 已提交
1203 1204 1205
	struct srp_rdma_ch *ch;
	int i, j, ret = 0;
	bool multich = false;
1206

1207
	srp_disconnect_target(target);
1208 1209 1210 1211

	if (target->state == SRP_TARGET_SCANNING)
		return -ENODEV;

1212
	/*
1213 1214 1215
	 * Now get a new local CM ID so that we avoid confusing the target in
	 * case things are really fouled up. Doing so also ensures that all CM
	 * callbacks will have finished before a new QP is allocated.
1216
	 */
B
Bart Van Assche 已提交
1217 1218 1219
	for (i = 0; i < target->ch_count; i++) {
		ch = &target->ch[i];
		ret += srp_new_cm_id(ch);
1220
	}
B
Bart Van Assche 已提交
1221 1222 1223 1224
	for (i = 0; i < target->ch_count; i++) {
		ch = &target->ch[i];
		for (j = 0; j < target->req_ring_size; ++j) {
			struct srp_request *req = &ch->req_ring[j];
1225

B
Bart Van Assche 已提交
1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
			srp_finish_req(ch, req, NULL, DID_RESET << 16);
		}
	}
	for (i = 0; i < target->ch_count; i++) {
		ch = &target->ch[i];
		/*
		 * Whether or not creating a new CM ID succeeded, create a new
		 * QP. This guarantees that all completion callback function
		 * invocations have finished before request resetting starts.
		 */
		ret += srp_create_ch_ib(ch);
1237

B
Bart Van Assche 已提交
1238 1239 1240 1241
		INIT_LIST_HEAD(&ch->free_tx);
		for (j = 0; j < target->queue_size; ++j)
			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
	}
1242 1243 1244

	target->qp_in_error = false;

B
Bart Van Assche 已提交
1245 1246
	for (i = 0; i < target->ch_count; i++) {
		ch = &target->ch[i];
1247
		if (ret)
B
Bart Van Assche 已提交
1248 1249 1250 1251
			break;
		ret = srp_connect_ch(ch, multich);
		multich = true;
	}
1252

1253 1254 1255
	if (ret == 0)
		shost_printk(KERN_INFO, target->scsi_host,
			     PFX "reconnect succeeded\n");
1256 1257 1258 1259

	return ret;
}

1260 1261
static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
			 unsigned int dma_len, u32 rkey)
1262
{
1263
	struct srp_direct_buf *desc = state->desc;
1264

1265 1266 1267
	desc->va = cpu_to_be64(dma_addr);
	desc->key = cpu_to_be32(rkey);
	desc->len = cpu_to_be32(dma_len);
1268

1269 1270 1271 1272
	state->total_len += dma_len;
	state->desc++;
	state->ndesc++;
}
1273

1274
static int srp_map_finish_fmr(struct srp_map_state *state,
1275
			      struct srp_rdma_ch *ch)
1276 1277 1278
{
	struct ib_pool_fmr *fmr;
	u64 io_addr = 0;
1279

1280
	fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1281 1282 1283
				   state->npages, io_addr);
	if (IS_ERR(fmr))
		return PTR_ERR(fmr);
1284

1285
	*state->next_fmr++ = fmr;
1286
	state->nmdesc++;
1287

1288
	srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
1289

1290 1291 1292
	return 0;
}

1293
static int srp_map_finish_fr(struct srp_map_state *state,
1294
			     struct srp_rdma_ch *ch)
1295
{
1296
	struct srp_target_port *target = ch->target;
1297 1298 1299 1300 1301 1302
	struct srp_device *dev = target->srp_host->srp_dev;
	struct ib_send_wr *bad_wr;
	struct ib_send_wr wr;
	struct srp_fr_desc *desc;
	u32 rkey;

1303
	desc = srp_fr_pool_get(ch->fr_pool);
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
	if (!desc)
		return -ENOMEM;

	rkey = ib_inc_rkey(desc->mr->rkey);
	ib_update_fast_reg_key(desc->mr, rkey);

	memcpy(desc->frpl->page_list, state->pages,
	       sizeof(state->pages[0]) * state->npages);

	memset(&wr, 0, sizeof(wr));
	wr.opcode = IB_WR_FAST_REG_MR;
	wr.wr_id = FAST_REG_WR_ID_MASK;
	wr.wr.fast_reg.iova_start = state->base_dma_addr;
	wr.wr.fast_reg.page_list = desc->frpl;
	wr.wr.fast_reg.page_list_len = state->npages;
	wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
	wr.wr.fast_reg.length = state->dma_len;
	wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
				       IB_ACCESS_REMOTE_READ |
				       IB_ACCESS_REMOTE_WRITE);
	wr.wr.fast_reg.rkey = desc->mr->lkey;

	*state->next_fr++ = desc;
	state->nmdesc++;

	srp_map_desc(state, state->base_dma_addr, state->dma_len,
		     desc->mr->rkey);

1332
	return ib_post_send(ch->qp, &wr, &bad_wr);
1333 1334
}

1335
static int srp_finish_mapping(struct srp_map_state *state,
1336
			      struct srp_rdma_ch *ch)
1337
{
1338
	struct srp_target_port *target = ch->target;
1339 1340 1341 1342 1343
	int ret = 0;

	if (state->npages == 0)
		return 0;

1344
	if (state->npages == 1 && !register_always)
1345
		srp_map_desc(state, state->base_dma_addr, state->dma_len,
1346 1347
			     target->rkey);
	else
1348
		ret = target->srp_host->srp_dev->use_fast_reg ?
1349 1350
			srp_map_finish_fr(state, ch) :
			srp_map_finish_fmr(state, ch);
1351 1352 1353

	if (ret == 0) {
		state->npages = 0;
1354
		state->dma_len = 0;
1355 1356 1357 1358 1359
	}

	return ret;
}

1360 1361 1362 1363 1364 1365 1366 1367
static void srp_map_update_start(struct srp_map_state *state,
				 struct scatterlist *sg, int sg_index,
				 dma_addr_t dma_addr)
{
	state->unmapped_sg = sg;
	state->unmapped_index = sg_index;
	state->unmapped_addr = dma_addr;
}
1368

1369
static int srp_map_sg_entry(struct srp_map_state *state,
1370
			    struct srp_rdma_ch *ch,
1371
			    struct scatterlist *sg, int sg_index,
1372
			    bool use_mr)
1373
{
1374
	struct srp_target_port *target = ch->target;
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
	struct srp_device *dev = target->srp_host->srp_dev;
	struct ib_device *ibdev = dev->dev;
	dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
	unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
	unsigned int len;
	int ret;

	if (!dma_len)
		return 0;

1385 1386 1387 1388
	if (!use_mr) {
		/*
		 * Once we're in direct map mode for a request, we don't
		 * go back to FMR or FR mode, so no need to update anything
1389 1390 1391 1392
		 * other than the descriptor.
		 */
		srp_map_desc(state, dma_addr, dma_len, target->rkey);
		return 0;
1393
	}
1394

1395 1396 1397 1398 1399
	/*
	 * Since not all RDMA HW drivers support non-zero page offsets for
	 * FMR, if we start at an offset into a page, don't merge into the
	 * current FMR mapping. Finish it out, and use the kernel's MR for
	 * this sg entry.
1400
	 */
1401 1402
	if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
	    dma_len > dev->mr_max_size) {
1403
		ret = srp_finish_mapping(state, ch);
1404 1405 1406 1407 1408 1409
		if (ret)
			return ret;

		srp_map_desc(state, dma_addr, dma_len, target->rkey);
		srp_map_update_start(state, NULL, 0, 0);
		return 0;
1410 1411
	}

1412 1413 1414 1415 1416
	/*
	 * If this is the first sg that will be mapped via FMR or via FR, save
	 * our position. We need to know the first unmapped entry, its index,
	 * and the first unmapped address within that entry to be able to
	 * restart mapping after an error.
1417 1418 1419
	 */
	if (!state->unmapped_sg)
		srp_map_update_start(state, sg, sg_index, dma_addr);
1420

1421
	while (dma_len) {
1422 1423
		unsigned offset = dma_addr & ~dev->mr_page_mask;
		if (state->npages == dev->max_pages_per_mr || offset != 0) {
1424
			ret = srp_finish_mapping(state, ch);
1425 1426
			if (ret)
				return ret;
1427

1428 1429 1430
			srp_map_update_start(state, sg, sg_index, dma_addr);
		}

1431
		len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1432

1433 1434
		if (!state->npages)
			state->base_dma_addr = dma_addr;
1435
		state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1436
		state->dma_len += len;
1437 1438 1439 1440
		dma_addr += len;
		dma_len -= len;
	}

1441 1442
	/*
	 * If the last entry of the MR wasn't a full page, then we need to
1443 1444 1445 1446
	 * close it out and start a new one -- we can only merge at page
	 * boundries.
	 */
	ret = 0;
1447
	if (len != dev->mr_page_size) {
1448
		ret = srp_finish_mapping(state, ch);
1449 1450 1451
		if (!ret)
			srp_map_update_start(state, NULL, 0, 0);
	}
1452 1453 1454
	return ret;
}

1455 1456 1457
static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
		      struct srp_request *req, struct scatterlist *scat,
		      int count)
1458
{
1459
	struct srp_target_port *target = ch->target;
1460 1461 1462
	struct srp_device *dev = target->srp_host->srp_dev;
	struct ib_device *ibdev = dev->dev;
	struct scatterlist *sg;
1463 1464
	int i;
	bool use_mr;
1465 1466 1467

	state->desc	= req->indirect_desc;
	state->pages	= req->map_page;
1468 1469
	if (dev->use_fast_reg) {
		state->next_fr = req->fr_list;
1470
		use_mr = !!ch->fr_pool;
1471 1472
	} else {
		state->next_fmr = req->fmr_list;
1473
		use_mr = !!ch->fmr_pool;
1474
	}
1475 1476

	for_each_sg(scat, sg, count, i) {
1477
		if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
1478 1479 1480 1481
			/*
			 * Memory registration failed, so backtrack to the
			 * first unmapped entry and continue on without using
			 * memory registration.
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
			 */
			dma_addr_t dma_addr;
			unsigned int dma_len;

backtrack:
			sg = state->unmapped_sg;
			i = state->unmapped_index;

			dma_addr = ib_sg_dma_address(ibdev, sg);
			dma_len = ib_sg_dma_len(ibdev, sg);
			dma_len -= (state->unmapped_addr - dma_addr);
			dma_addr = state->unmapped_addr;
1494
			use_mr = false;
1495 1496 1497 1498
			srp_map_desc(state, dma_addr, dma_len, target->rkey);
		}
	}

1499
	if (use_mr && srp_finish_mapping(state, ch))
1500 1501
		goto backtrack;

1502
	req->nmdesc = state->nmdesc;
1503 1504

	return 0;
1505 1506
}

1507
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1508 1509
			struct srp_request *req)
{
1510
	struct srp_target_port *target = ch->target;
1511
	struct scatterlist *scat;
1512
	struct srp_cmd *cmd = req->cmd->buf;
1513
	int len, nents, count;
1514 1515
	struct srp_device *dev;
	struct ib_device *ibdev;
1516 1517 1518 1519
	struct srp_map_state state;
	struct srp_indirect_buf *indirect_hdr;
	u32 table_len;
	u8 fmt;
1520

1521
	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1522 1523 1524 1525
		return sizeof (struct srp_cmd);

	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
1526 1527 1528
		shost_printk(KERN_WARNING, target->scsi_host,
			     PFX "Unhandled data direction %d\n",
			     scmnd->sc_data_direction);
1529 1530 1531
		return -EINVAL;
	}

1532 1533
	nents = scsi_sg_count(scmnd);
	scat  = scsi_sglist(scmnd);
1534

1535
	dev = target->srp_host->srp_dev;
1536 1537 1538
	ibdev = dev->dev;

	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1539 1540
	if (unlikely(count == 0))
		return -EIO;
1541 1542 1543

	fmt = SRP_DATA_DESC_DIRECT;
	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);
1544

1545
	if (count == 1 && !register_always) {
1546 1547 1548 1549 1550 1551
		/*
		 * The midlayer only generated a single gather/scatter
		 * entry, or DMA mapping coalesced everything to a
		 * single entry.  So a direct descriptor along with
		 * the DMA MR suffices.
		 */
1552
		struct srp_direct_buf *buf = (void *) cmd->add_data;
1553

1554
		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1555
		buf->key = cpu_to_be32(target->rkey);
1556
		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1557

1558
		req->nmdesc = 0;
1559 1560 1561
		goto map_complete;
	}

1562 1563 1564
	/*
	 * We have more than one scatter/gather entry, so build our indirect
	 * descriptor table, trying to merge as many entries as we can.
1565 1566 1567
	 */
	indirect_hdr = (void *) cmd->add_data;

1568 1569 1570
	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
				   target->indirect_size, DMA_TO_DEVICE);

1571
	memset(&state, 0, sizeof(state));
1572
	srp_map_sg(&state, ch, req, scat, count);
1573

1574 1575 1576 1577 1578
	/* We've mapped the request, now pull as much of the indirect
	 * descriptor table as we can into the command buffer. If this
	 * target is not using an external indirect table, we are
	 * guaranteed to fit into the command, as the SCSI layer won't
	 * give us more S/G entries than we allow.
1579 1580
	 */
	if (state.ndesc == 1) {
1581 1582
		/*
		 * Memory registration collapsed the sg-list into one entry,
1583 1584 1585
		 * so use a direct descriptor.
		 */
		struct srp_direct_buf *buf = (void *) cmd->add_data;
1586

1587
		*buf = req->indirect_desc[0];
1588
		goto map_complete;
1589 1590
	}

1591 1592 1593 1594 1595 1596 1597 1598
	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
						!target->allow_ext_sg)) {
		shost_printk(KERN_ERR, target->scsi_host,
			     "Could not fit S/G list into SRP_CMD\n");
		return -EIO;
	}

	count = min(state.ndesc, target->cmd_sg_cnt);
1599 1600 1601 1602
	table_len = state.ndesc * sizeof (struct srp_direct_buf);

	fmt = SRP_DATA_DESC_INDIRECT;
	len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1603
	len += count * sizeof (struct srp_direct_buf);
1604

1605 1606
	memcpy(indirect_hdr->desc_list, req->indirect_desc,
	       count * sizeof (struct srp_direct_buf));
1607

1608
	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1609 1610 1611 1612 1613
	indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
	indirect_hdr->len = cpu_to_be32(state.total_len);

	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1614
		cmd->data_out_desc_cnt = count;
1615
	else
1616 1617 1618 1619
		cmd->data_in_desc_cnt = count;

	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
				      DMA_TO_DEVICE);
1620 1621

map_complete:
1622 1623 1624 1625 1626 1627 1628 1629
	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
		cmd->buf_fmt = fmt << 4;
	else
		cmd->buf_fmt = fmt;

	return len;
}

1630 1631 1632
/*
 * Return an IU and possible credit to the free pool
 */
1633
static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1634 1635 1636 1637
			  enum srp_iu_type iu_type)
{
	unsigned long flags;

1638 1639
	spin_lock_irqsave(&ch->lock, flags);
	list_add(&iu->list, &ch->free_tx);
1640
	if (iu_type != SRP_IU_RSP)
1641 1642
		++ch->req_lim;
	spin_unlock_irqrestore(&ch->lock, flags);
1643 1644
}

1645
/*
1646
 * Must be called with ch->lock held to protect req_lim and free_tx.
1647
 * If IU is not sent, it must be returned using srp_put_tx_iu().
1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
 *
 * Note:
 * An upper limit for the number of allocated information units for each
 * request type is:
 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
 *   more than Scsi_Host.can_queue requests.
 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
 *   one unanswered SRP request to an initiator.
 */
1658
static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1659 1660
				      enum srp_iu_type iu_type)
{
1661
	struct srp_target_port *target = ch->target;
1662 1663 1664
	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
	struct srp_iu *iu;

1665
	srp_send_completion(ch->send_cq, ch);
1666

1667
	if (list_empty(&ch->free_tx))
1668 1669 1670
		return NULL;

	/* Initiator responses to target requests do not consume credits */
1671
	if (iu_type != SRP_IU_RSP) {
1672
		if (ch->req_lim <= rsv) {
1673 1674 1675 1676
			++target->zero_req_lim;
			return NULL;
		}

1677
		--ch->req_lim;
1678 1679
	}

1680
	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1681
	list_del(&iu->list);
1682 1683 1684
	return iu;
}

1685
static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1686
{
1687
	struct srp_target_port *target = ch->target;
1688 1689 1690 1691 1692
	struct ib_sge list;
	struct ib_send_wr wr, *bad_wr;

	list.addr   = iu->dma;
	list.length = len;
1693
	list.lkey   = target->lkey;
1694 1695

	wr.next       = NULL;
1696
	wr.wr_id      = (uintptr_t) iu;
1697 1698 1699 1700 1701
	wr.sg_list    = &list;
	wr.num_sge    = 1;
	wr.opcode     = IB_WR_SEND;
	wr.send_flags = IB_SEND_SIGNALED;

1702
	return ib_post_send(ch->qp, &wr, &bad_wr);
1703 1704
}

1705
static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1706
{
1707
	struct srp_target_port *target = ch->target;
1708
	struct ib_recv_wr wr, *bad_wr;
1709
	struct ib_sge list;
1710 1711 1712

	list.addr   = iu->dma;
	list.length = iu->size;
1713
	list.lkey   = target->lkey;
1714 1715

	wr.next     = NULL;
1716
	wr.wr_id    = (uintptr_t) iu;
1717 1718 1719
	wr.sg_list  = &list;
	wr.num_sge  = 1;

1720
	return ib_post_recv(ch->qp, &wr, &bad_wr);
1721 1722
}

1723
static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1724
{
1725
	struct srp_target_port *target = ch->target;
1726 1727 1728 1729 1730
	struct srp_request *req;
	struct scsi_cmnd *scmnd;
	unsigned long flags;

	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1731 1732 1733
		spin_lock_irqsave(&ch->lock, flags);
		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
		spin_unlock_irqrestore(&ch->lock, flags);
1734

1735
		ch->tsk_mgmt_status = -1;
1736
		if (be32_to_cpu(rsp->resp_data_len) >= 4)
1737 1738
			ch->tsk_mgmt_status = rsp->data[3];
		complete(&ch->tsk_mgmt_done);
1739
	} else {
B
Bart Van Assche 已提交
1740 1741 1742 1743 1744
		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
		if (scmnd) {
			req = (void *)scmnd->host_scribble;
			scmnd = srp_claim_req(ch, req, NULL, scmnd);
		}
B
Bart Van Assche 已提交
1745
		if (!scmnd) {
1746
			shost_printk(KERN_ERR, target->scsi_host,
B
Bart Van Assche 已提交
1747 1748
				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
				     rsp->tag, ch - target->ch, ch->qp->qp_num);
B
Bart Van Assche 已提交
1749

1750 1751 1752
			spin_lock_irqsave(&ch->lock, flags);
			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
			spin_unlock_irqrestore(&ch->lock, flags);
B
Bart Van Assche 已提交
1753 1754 1755

			return;
		}
1756 1757 1758 1759 1760 1761 1762 1763 1764
		scmnd->result = rsp->status;

		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
			memcpy(scmnd->sense_buffer, rsp->data +
			       be32_to_cpu(rsp->resp_data_len),
			       min_t(int, be32_to_cpu(rsp->sense_data_len),
				     SCSI_SENSE_BUFFERSIZE));
		}

B
Bart Van Assche 已提交
1765
		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1766
			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
B
Bart Van Assche 已提交
1767 1768 1769 1770 1771 1772
		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1773

1774
		srp_free_req(ch, req, scmnd,
B
Bart Van Assche 已提交
1775 1776
			     be32_to_cpu(rsp->req_lim_delta));

1777 1778
		scmnd->host_scribble = NULL;
		scmnd->scsi_done(scmnd);
1779 1780 1781
	}
}

1782
static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1783 1784
			       void *rsp, int len)
{
1785
	struct srp_target_port *target = ch->target;
1786
	struct ib_device *dev = target->srp_host->srp_dev->dev;
1787 1788
	unsigned long flags;
	struct srp_iu *iu;
1789
	int err;
1790

1791 1792 1793 1794
	spin_lock_irqsave(&ch->lock, flags);
	ch->req_lim += req_delta;
	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
	spin_unlock_irqrestore(&ch->lock, flags);
1795

1796 1797 1798
	if (!iu) {
		shost_printk(KERN_ERR, target->scsi_host, PFX
			     "no IU available to send response\n");
1799
		return 1;
1800 1801 1802 1803 1804 1805
	}

	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
	memcpy(iu->buf, rsp, len);
	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);

1806
	err = srp_post_send(ch, iu, len);
1807
	if (err) {
1808 1809
		shost_printk(KERN_ERR, target->scsi_host, PFX
			     "unable to post response: %d\n", err);
1810
		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1811
	}
1812 1813 1814 1815

	return err;
}

1816
static void srp_process_cred_req(struct srp_rdma_ch *ch,
1817 1818 1819 1820 1821 1822 1823 1824
				 struct srp_cred_req *req)
{
	struct srp_cred_rsp rsp = {
		.opcode = SRP_CRED_RSP,
		.tag = req->tag,
	};
	s32 delta = be32_to_cpu(req->req_lim_delta);

1825 1826
	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1827 1828 1829
			     "problems processing SRP_CRED_REQ\n");
}

1830
static void srp_process_aer_req(struct srp_rdma_ch *ch,
1831 1832
				struct srp_aer_req *req)
{
1833
	struct srp_target_port *target = ch->target;
1834 1835 1836 1837 1838 1839 1840
	struct srp_aer_rsp rsp = {
		.opcode = SRP_AER_RSP,
		.tag = req->tag,
	};
	s32 delta = be32_to_cpu(req->req_lim_delta);

	shost_printk(KERN_ERR, target->scsi_host, PFX
B
Bart Van Assche 已提交
1841
		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1842

1843
	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1844 1845 1846 1847
		shost_printk(KERN_ERR, target->scsi_host, PFX
			     "problems processing SRP_AER_REQ\n");
}

1848
static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1849
{
1850
	struct srp_target_port *target = ch->target;
1851
	struct ib_device *dev = target->srp_host->srp_dev->dev;
1852
	struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1853
	int res;
1854 1855
	u8 opcode;

1856
	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1857
				   DMA_FROM_DEVICE);
1858 1859 1860 1861

	opcode = *(u8 *) iu->buf;

	if (0) {
1862 1863
		shost_printk(KERN_ERR, target->scsi_host,
			     PFX "recv completion, opcode 0x%02x\n", opcode);
B
Bart Van Assche 已提交
1864 1865
		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
			       iu->buf, wc->byte_len, true);
1866 1867 1868 1869
	}

	switch (opcode) {
	case SRP_RSP:
1870
		srp_process_rsp(ch, iu->buf);
1871 1872
		break;

1873
	case SRP_CRED_REQ:
1874
		srp_process_cred_req(ch, iu->buf);
1875 1876 1877
		break;

	case SRP_AER_REQ:
1878
		srp_process_aer_req(ch, iu->buf);
1879 1880
		break;

1881 1882
	case SRP_T_LOGOUT:
		/* XXX Handle target logout */
1883 1884
		shost_printk(KERN_WARNING, target->scsi_host,
			     PFX "Got target logout request\n");
1885 1886 1887
		break;

	default:
1888 1889
		shost_printk(KERN_WARNING, target->scsi_host,
			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1890 1891 1892
		break;
	}

1893
	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1894
				      DMA_FROM_DEVICE);
1895

1896
	res = srp_post_recv(ch, iu);
1897 1898 1899
	if (res != 0)
		shost_printk(KERN_ERR, target->scsi_host,
			     PFX "Recv failed with error code %d\n", res);
1900 1901
}

1902 1903
/**
 * srp_tl_err_work() - handle a transport layer error
1904
 * @work: Work structure embedded in an SRP target port.
1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917
 *
 * Note: This function may get invoked before the rport has been created,
 * hence the target->rport test.
 */
static void srp_tl_err_work(struct work_struct *work)
{
	struct srp_target_port *target;

	target = container_of(work, struct srp_target_port, tl_err_work);
	if (target->rport)
		srp_start_tl_fail_timers(target->rport);
}

1918
static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1919
			      bool send_err, struct srp_rdma_ch *ch)
1920
{
1921 1922 1923 1924 1925 1926 1927
	struct srp_target_port *target = ch->target;

	if (wr_id == SRP_LAST_WR_ID) {
		complete(&ch->done);
		return;
	}

1928
	if (ch->connected && !target->qp_in_error) {
1929 1930
		if (wr_id & LOCAL_INV_WR_ID_MASK) {
			shost_printk(KERN_ERR, target->scsi_host, PFX
1931 1932
				     "LOCAL_INV failed with status %s (%d)\n",
				     ib_wc_status_msg(wc_status), wc_status);
1933 1934
		} else if (wr_id & FAST_REG_WR_ID_MASK) {
			shost_printk(KERN_ERR, target->scsi_host, PFX
1935 1936
				     "FAST_REG_MR failed status %s (%d)\n",
				     ib_wc_status_msg(wc_status), wc_status);
1937 1938
		} else {
			shost_printk(KERN_ERR, target->scsi_host,
1939
				     PFX "failed %s status %s (%d) for iu %p\n",
1940
				     send_err ? "send" : "receive",
1941 1942
				     ib_wc_status_msg(wc_status), wc_status,
				     (void *)(uintptr_t)wr_id);
1943
		}
1944
		queue_work(system_long_wq, &target->tl_err_work);
1945
	}
1946 1947 1948
	target->qp_in_error = true;
}

1949
static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1950
{
1951
	struct srp_rdma_ch *ch = ch_ptr;
1952 1953 1954 1955
	struct ib_wc wc;

	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
	while (ib_poll_cq(cq, 1, &wc) > 0) {
1956
		if (likely(wc.status == IB_WC_SUCCESS)) {
1957
			srp_handle_recv(ch, &wc);
1958
		} else {
1959
			srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1960
		}
1961 1962 1963
	}
}

1964
static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1965
{
1966
	struct srp_rdma_ch *ch = ch_ptr;
1967
	struct ib_wc wc;
1968
	struct srp_iu *iu;
1969 1970

	while (ib_poll_cq(cq, 1, &wc) > 0) {
1971 1972
		if (likely(wc.status == IB_WC_SUCCESS)) {
			iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1973
			list_add(&iu->list, &ch->free_tx);
1974
		} else {
1975
			srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1976
		}
1977 1978 1979
	}
}

1980
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1981
{
1982
	struct srp_target_port *target = host_to_target(shost);
1983
	struct srp_rport *rport = target->rport;
1984
	struct srp_rdma_ch *ch;
1985 1986 1987
	struct srp_request *req;
	struct srp_iu *iu;
	struct srp_cmd *cmd;
1988
	struct ib_device *dev;
1989
	unsigned long flags;
B
Bart Van Assche 已提交
1990 1991
	u32 tag;
	u16 idx;
1992
	int len, ret;
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
	const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;

	/*
	 * The SCSI EH thread is the only context from which srp_queuecommand()
	 * can get invoked for blocked devices (SDEV_BLOCK /
	 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
	 * locking the rport mutex if invoked from inside the SCSI EH.
	 */
	if (in_scsi_eh)
		mutex_lock(&rport->mutex);
2003

2004 2005 2006
	scmnd->result = srp_chkready(target->rport);
	if (unlikely(scmnd->result))
		goto err;
2007

B
Bart Van Assche 已提交
2008 2009
	WARN_ON_ONCE(scmnd->request->tag < 0);
	tag = blk_mq_unique_tag(scmnd->request);
B
Bart Van Assche 已提交
2010
	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
B
Bart Van Assche 已提交
2011 2012 2013 2014
	idx = blk_mq_unique_tag_to_tag(tag);
	WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
		  dev_name(&shost->shost_gendev), tag, idx,
		  target->req_ring_size);
2015 2016 2017 2018

	spin_lock_irqsave(&ch->lock, flags);
	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
	spin_unlock_irqrestore(&ch->lock, flags);
2019

B
Bart Van Assche 已提交
2020 2021 2022 2023
	if (!iu)
		goto err;

	req = &ch->req_ring[idx];
2024
	dev = target->srp_host->srp_dev->dev;
2025
	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2026
				   DMA_TO_DEVICE);
2027

2028
	scmnd->host_scribble = (void *) req;
2029 2030 2031 2032 2033

	cmd = iu->buf;
	memset(cmd, 0, sizeof *cmd);

	cmd->opcode = SRP_CMD;
B
Bart Van Assche 已提交
2034
	int_to_scsilun(scmnd->device->lun, &cmd->lun);
B
Bart Van Assche 已提交
2035
	cmd->tag    = tag;
2036 2037 2038 2039 2040
	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);

	req->scmnd    = scmnd;
	req->cmd      = iu;

2041
	len = srp_map_data(scmnd, ch, req);
2042
	if (len < 0) {
2043
		shost_printk(KERN_ERR, target->scsi_host,
2044 2045 2046 2047
			     PFX "Failed to map data (%d)\n", len);
		/*
		 * If we ran out of memory descriptors (-ENOMEM) because an
		 * application is queuing many requests with more than
2048
		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2049 2050 2051 2052
		 * to reduce queue depth temporarily.
		 */
		scmnd->result = len == -ENOMEM ?
			DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2053
		goto err_iu;
2054 2055
	}

2056
	ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2057
				      DMA_TO_DEVICE);
2058

2059
	if (srp_post_send(ch, iu, len)) {
2060
		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2061 2062 2063
		goto err_unmap;
	}

2064 2065
	ret = 0;

2066 2067 2068 2069
unlock_rport:
	if (in_scsi_eh)
		mutex_unlock(&rport->mutex);

2070
	return ret;
2071 2072

err_unmap:
2073
	srp_unmap_data(scmnd, ch, req);
2074

2075
err_iu:
2076
	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2077

2078 2079 2080 2081 2082 2083
	/*
	 * Avoid that the loops that iterate over the request ring can
	 * encounter a dangling SCSI command pointer.
	 */
	req->scmnd = NULL;

2084 2085 2086 2087 2088 2089 2090
err:
	if (scmnd->result) {
		scmnd->scsi_done(scmnd);
		ret = 0;
	} else {
		ret = SCSI_MLQUEUE_HOST_BUSY;
	}
2091

2092
	goto unlock_rport;
2093 2094
}

2095 2096
/*
 * Note: the resources allocated in this function are freed in
2097
 * srp_free_ch_ib().
2098
 */
2099
static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2100
{
2101
	struct srp_target_port *target = ch->target;
2102 2103
	int i;

2104 2105 2106
	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
			      GFP_KERNEL);
	if (!ch->rx_ring)
2107
		goto err_no_ring;
2108 2109 2110
	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
			      GFP_KERNEL);
	if (!ch->tx_ring)
2111 2112 2113
		goto err_no_ring;

	for (i = 0; i < target->queue_size; ++i) {
2114 2115 2116 2117
		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
					      ch->max_ti_iu_len,
					      GFP_KERNEL, DMA_FROM_DEVICE);
		if (!ch->rx_ring[i])
2118 2119 2120
			goto err;
	}

2121
	for (i = 0; i < target->queue_size; ++i) {
2122 2123 2124 2125
		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
					      target->max_iu_len,
					      GFP_KERNEL, DMA_TO_DEVICE);
		if (!ch->tx_ring[i])
2126
			goto err;
2127

2128
		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2129 2130 2131 2132 2133
	}

	return 0;

err:
2134
	for (i = 0; i < target->queue_size; ++i) {
2135 2136
		srp_free_iu(target->srp_host, ch->rx_ring[i]);
		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2137 2138
	}

2139 2140

err_no_ring:
2141 2142 2143 2144
	kfree(ch->tx_ring);
	ch->tx_ring = NULL;
	kfree(ch->rx_ring);
	ch->rx_ring = NULL;
2145

2146 2147 2148
	return -ENOMEM;
}

2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
{
	uint64_t T_tr_ns, max_compl_time_ms;
	uint32_t rq_tmo_jiffies;

	/*
	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
	 * table 91), both the QP timeout and the retry count have to be set
	 * for RC QP's during the RTR to RTS transition.
	 */
	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));

	/*
	 * Set target->rq_tmo_jiffies to one second more than the largest time
	 * it can take before an error completion is generated. See also
	 * C9-140..142 in the IBTA spec for more information about how to
	 * convert the QP Local ACK Timeout value to nanoseconds.
	 */
	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
	do_div(max_compl_time_ms, NSEC_PER_MSEC);
	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);

	return rq_tmo_jiffies;
}

2176 2177
static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
			       struct srp_login_rsp *lrsp,
2178
			       struct srp_rdma_ch *ch)
2179
{
2180
	struct srp_target_port *target = ch->target;
2181 2182 2183 2184 2185 2186
	struct ib_qp_attr *qp_attr = NULL;
	int attr_mask = 0;
	int ret;
	int i;

	if (lrsp->opcode == SRP_LOGIN_RSP) {
2187 2188
		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2189 2190 2191 2192 2193 2194

		/*
		 * Reserve credits for task management so we don't
		 * bounce requests back to the SCSI mid-layer.
		 */
		target->scsi_host->can_queue
2195
			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2196
			      target->scsi_host->can_queue);
2197 2198 2199
		target->scsi_host->cmd_per_lun
			= min_t(int, target->scsi_host->can_queue,
				target->scsi_host->cmd_per_lun);
2200 2201 2202 2203 2204 2205 2206
	} else {
		shost_printk(KERN_WARNING, target->scsi_host,
			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
		ret = -ECONNRESET;
		goto error;
	}

2207 2208
	if (!ch->rx_ring) {
		ret = srp_alloc_iu_bufs(ch);
2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
		if (ret)
			goto error;
	}

	ret = -ENOMEM;
	qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
	if (!qp_attr)
		goto error;

	qp_attr->qp_state = IB_QPS_RTR;
	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
	if (ret)
		goto error_free;

2223
	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2224 2225 2226
	if (ret)
		goto error_free;

2227
	for (i = 0; i < target->queue_size; i++) {
2228 2229 2230
		struct srp_iu *iu = ch->rx_ring[i];

		ret = srp_post_recv(ch, iu);
2231 2232 2233 2234 2235 2236 2237 2238 2239
		if (ret)
			goto error_free;
	}

	qp_attr->qp_state = IB_QPS_RTS;
	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
	if (ret)
		goto error_free;

2240 2241
	target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);

2242
	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2243 2244 2245 2246 2247 2248 2249 2250 2251
	if (ret)
		goto error_free;

	ret = ib_send_cm_rtu(cm_id, NULL, 0);

error_free:
	kfree(qp_attr);

error:
2252
	ch->status = ret;
2253 2254
}

2255 2256
static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
			       struct ib_cm_event *event,
2257
			       struct srp_rdma_ch *ch)
2258
{
2259
	struct srp_target_port *target = ch->target;
2260
	struct Scsi_Host *shost = target->scsi_host;
2261 2262 2263 2264 2265 2266
	struct ib_class_port_info *cpi;
	int opcode;

	switch (event->param.rej_rcvd.reason) {
	case IB_CM_REJ_PORT_CM_REDIRECT:
		cpi = event->param.rej_rcvd.ari;
2267 2268
		ch->path.dlid = cpi->redirect_lid;
		ch->path.pkey = cpi->redirect_pkey;
2269
		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2270
		memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2271

2272
		ch->status = ch->path.dlid ?
2273 2274 2275 2276
			SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
		break;

	case IB_CM_REJ_PORT_REDIRECT:
2277
		if (srp_target_is_topspin(target)) {
2278 2279 2280 2281 2282
			/*
			 * Topspin/Cisco SRP gateways incorrectly send
			 * reject reason code 25 when they mean 24
			 * (port redirect).
			 */
2283
			memcpy(ch->path.dgid.raw,
2284 2285
			       event->param.rej_rcvd.ari, 16);

2286 2287
			shost_printk(KERN_DEBUG, shost,
				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2288 2289
				     be64_to_cpu(ch->path.dgid.global.subnet_prefix),
				     be64_to_cpu(ch->path.dgid.global.interface_id));
2290

2291
			ch->status = SRP_PORT_REDIRECT;
2292
		} else {
2293 2294
			shost_printk(KERN_WARNING, shost,
				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2295
			ch->status = -ECONNRESET;
2296 2297 2298 2299
		}
		break;

	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2300 2301
		shost_printk(KERN_WARNING, shost,
			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2302
		ch->status = -ECONNRESET;
2303 2304 2305 2306 2307 2308 2309 2310 2311
		break;

	case IB_CM_REJ_CONSUMER_DEFINED:
		opcode = *(u8 *) event->private_data;
		if (opcode == SRP_LOGIN_REJ) {
			struct srp_login_rej *rej = event->private_data;
			u32 reason = be32_to_cpu(rej->reason);

			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2312 2313
				shost_printk(KERN_WARNING, shost,
					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2314
			else
B
Bart Van Assche 已提交
2315 2316
				shost_printk(KERN_WARNING, shost, PFX
					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2317 2318
					     target->sgid.raw,
					     target->orig_dgid.raw, reason);
2319
		} else
2320 2321 2322
			shost_printk(KERN_WARNING, shost,
				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
				     " opcode 0x%02x\n", opcode);
2323
		ch->status = -ECONNRESET;
2324 2325
		break;

D
David Dillow 已提交
2326 2327
	case IB_CM_REJ_STALE_CONN:
		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2328
		ch->status = SRP_STALE_CONN;
D
David Dillow 已提交
2329 2330
		break;

2331
	default:
2332 2333
		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
			     event->param.rej_rcvd.reason);
2334
		ch->status = -ECONNRESET;
2335 2336 2337 2338 2339
	}
}

static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
2340 2341
	struct srp_rdma_ch *ch = cm_id->context;
	struct srp_target_port *target = ch->target;
2342 2343 2344 2345
	int comp = 0;

	switch (event->event) {
	case IB_CM_REQ_ERROR:
2346 2347
		shost_printk(KERN_DEBUG, target->scsi_host,
			     PFX "Sending CM REQ failed\n");
2348
		comp = 1;
2349
		ch->status = -ECONNRESET;
2350 2351 2352 2353
		break;

	case IB_CM_REP_RECEIVED:
		comp = 1;
2354
		srp_cm_rep_handler(cm_id, event->private_data, ch);
2355 2356 2357
		break;

	case IB_CM_REJ_RECEIVED:
2358
		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2359 2360
		comp = 1;

2361
		srp_cm_rej_handler(cm_id, event, ch);
2362 2363
		break;

2364
	case IB_CM_DREQ_RECEIVED:
2365 2366
		shost_printk(KERN_WARNING, target->scsi_host,
			     PFX "DREQ received - connection closed\n");
2367
		ch->connected = false;
2368
		if (ib_send_cm_drep(cm_id, NULL, 0))
2369 2370
			shost_printk(KERN_ERR, target->scsi_host,
				     PFX "Sending CM DREP failed\n");
2371
		queue_work(system_long_wq, &target->tl_err_work);
2372 2373 2374
		break;

	case IB_CM_TIMEWAIT_EXIT:
2375 2376
		shost_printk(KERN_ERR, target->scsi_host,
			     PFX "connection closed\n");
2377
		comp = 1;
2378

2379
		ch->status = 0;
2380 2381
		break;

2382 2383 2384 2385 2386
	case IB_CM_MRA_RECEIVED:
	case IB_CM_DREQ_ERROR:
	case IB_CM_DREP_RECEIVED:
		break;

2387
	default:
2388 2389
		shost_printk(KERN_WARNING, target->scsi_host,
			     PFX "Unhandled CM event %d\n", event->event);
2390 2391 2392 2393
		break;
	}

	if (comp)
2394
		complete(&ch->done);
2395 2396 2397 2398

	return 0;
}

2399 2400 2401 2402 2403 2404 2405 2406
/**
 * srp_change_queue_depth - setting device queue depth
 * @sdev: scsi device struct
 * @qdepth: requested queue depth
 *
 * Returns queue depth.
 */
static int
2407
srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2408
{
2409
	if (!sdev->tagged_supported)
2410
		qdepth = 1;
2411
	return scsi_change_queue_depth(sdev, qdepth);
2412 2413
}

B
Bart Van Assche 已提交
2414 2415
static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
			     u8 func)
2416
{
2417
	struct srp_target_port *target = ch->target;
2418
	struct srp_rport *rport = target->rport;
2419
	struct ib_device *dev = target->srp_host->srp_dev->dev;
2420 2421 2422
	struct srp_iu *iu;
	struct srp_tsk_mgmt *tsk_mgmt;

2423
	if (!ch->connected || target->qp_in_error)
2424 2425
		return -1;

2426
	init_completion(&ch->tsk_mgmt_done);
2427

2428
	/*
2429
	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2430 2431 2432
	 * invoked while a task management function is being sent.
	 */
	mutex_lock(&rport->mutex);
2433 2434 2435
	spin_lock_irq(&ch->lock);
	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
	spin_unlock_irq(&ch->lock);
2436

2437 2438 2439
	if (!iu) {
		mutex_unlock(&rport->mutex);

2440
		return -1;
2441
	}
2442

2443 2444
	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
				   DMA_TO_DEVICE);
2445 2446 2447 2448
	tsk_mgmt = iu->buf;
	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);

	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
B
Bart Van Assche 已提交
2449
	int_to_scsilun(lun, &tsk_mgmt->lun);
2450
	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
2451
	tsk_mgmt->tsk_mgmt_func = func;
2452
	tsk_mgmt->task_tag	= req_tag;
2453

2454 2455
	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
				      DMA_TO_DEVICE);
2456 2457
	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2458 2459
		mutex_unlock(&rport->mutex);

2460 2461
		return -1;
	}
2462
	mutex_unlock(&rport->mutex);
2463

2464
	if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2465
					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2466
		return -1;
2467

2468 2469 2470
	return 0;
}

2471 2472
static int srp_abort(struct scsi_cmnd *scmnd)
{
2473
	struct srp_target_port *target = host_to_target(scmnd->device->host);
2474
	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
B
Bart Van Assche 已提交
2475
	u32 tag;
B
Bart Van Assche 已提交
2476
	u16 ch_idx;
2477
	struct srp_rdma_ch *ch;
2478
	int ret;
2479

2480
	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2481

B
Bart Van Assche 已提交
2482
	if (!req)
2483
		return SUCCESS;
B
Bart Van Assche 已提交
2484
	tag = blk_mq_unique_tag(scmnd->request);
B
Bart Van Assche 已提交
2485 2486 2487 2488 2489 2490 2491 2492
	ch_idx = blk_mq_unique_tag_to_hwq(tag);
	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
		return SUCCESS;
	ch = &target->ch[ch_idx];
	if (!srp_claim_req(ch, req, NULL, scmnd))
		return SUCCESS;
	shost_printk(KERN_ERR, target->scsi_host,
		     "Sending SRP abort for tag %#x\n", tag);
B
Bart Van Assche 已提交
2493
	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2494
			      SRP_TSK_ABORT_TASK) == 0)
2495
		ret = SUCCESS;
2496
	else if (target->rport->state == SRP_RPORT_LOST)
2497
		ret = FAST_IO_FAIL;
2498 2499
	else
		ret = FAILED;
2500
	srp_free_req(ch, req, scmnd, 0);
B
Bart Van Assche 已提交
2501
	scmnd->result = DID_ABORT << 16;
2502
	scmnd->scsi_done(scmnd);
2503

2504
	return ret;
2505 2506 2507 2508
}

static int srp_reset_device(struct scsi_cmnd *scmnd)
{
2509
	struct srp_target_port *target = host_to_target(scmnd->device->host);
B
Bart Van Assche 已提交
2510
	struct srp_rdma_ch *ch;
2511
	int i;
2512

2513
	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2514

B
Bart Van Assche 已提交
2515
	ch = &target->ch[0];
2516
	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2517
			      SRP_TSK_LUN_RESET))
2518
		return FAILED;
2519
	if (ch->tsk_mgmt_status)
2520 2521
		return FAILED;

B
Bart Van Assche 已提交
2522 2523 2524 2525
	for (i = 0; i < target->ch_count; i++) {
		ch = &target->ch[i];
		for (i = 0; i < target->req_ring_size; ++i) {
			struct srp_request *req = &ch->req_ring[i];
2526

B
Bart Van Assche 已提交
2527 2528
			srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
		}
2529
	}
2530 2531

	return SUCCESS;
2532 2533 2534 2535 2536 2537
}

static int srp_reset_host(struct scsi_cmnd *scmnd)
{
	struct srp_target_port *target = host_to_target(scmnd->device->host);

2538
	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2539

2540
	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2541 2542
}

2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557
static int srp_slave_configure(struct scsi_device *sdev)
{
	struct Scsi_Host *shost = sdev->host;
	struct srp_target_port *target = host_to_target(shost);
	struct request_queue *q = sdev->request_queue;
	unsigned long timeout;

	if (sdev->type == TYPE_DISK) {
		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
		blk_queue_rq_timeout(q, timeout);
	}

	return 0;
}

2558 2559
static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
			   char *buf)
2560
{
2561
	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2562

2563
	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2564 2565
}

2566 2567
static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
			     char *buf)
2568
{
2569
	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2570

2571
	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2572 2573
}

2574 2575
static ssize_t show_service_id(struct device *dev,
			       struct device_attribute *attr, char *buf)
2576
{
2577
	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2578

2579
	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2580 2581
}

2582 2583
static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
			 char *buf)
2584
{
2585
	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2586

2587
	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2588 2589
}

B
Bart Van Assche 已提交
2590 2591 2592 2593 2594
static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
			 char *buf)
{
	struct srp_target_port *target = host_to_target(class_to_shost(dev));

2595
	return sprintf(buf, "%pI6\n", target->sgid.raw);
B
Bart Van Assche 已提交
2596 2597
}

2598 2599
static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
			 char *buf)
2600
{
2601
	struct srp_target_port *target = host_to_target(class_to_shost(dev));
B
Bart Van Assche 已提交
2602
	struct srp_rdma_ch *ch = &target->ch[0];
2603

2604
	return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2605 2606
}

2607 2608
static ssize_t show_orig_dgid(struct device *dev,
			      struct device_attribute *attr, char *buf)
2609
{
2610
	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2611

2612
	return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2613 2614
}

2615 2616 2617 2618
static ssize_t show_req_lim(struct device *dev,
			    struct device_attribute *attr, char *buf)
{
	struct srp_target_port *target = host_to_target(class_to_shost(dev));
B
Bart Van Assche 已提交
2619 2620
	struct srp_rdma_ch *ch;
	int i, req_lim = INT_MAX;
2621

B
Bart Van Assche 已提交
2622 2623 2624 2625 2626
	for (i = 0; i < target->ch_count; i++) {
		ch = &target->ch[i];
		req_lim = min(req_lim, ch->req_lim);
	}
	return sprintf(buf, "%d\n", req_lim);
2627 2628
}

2629 2630
static ssize_t show_zero_req_lim(struct device *dev,
				 struct device_attribute *attr, char *buf)
2631
{
2632
	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2633 2634 2635 2636

	return sprintf(buf, "%d\n", target->zero_req_lim);
}

2637 2638
static ssize_t show_local_ib_port(struct device *dev,
				  struct device_attribute *attr, char *buf)
2639
{
2640
	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2641 2642 2643 2644

	return sprintf(buf, "%d\n", target->srp_host->port);
}

2645 2646
static ssize_t show_local_ib_device(struct device *dev,
				    struct device_attribute *attr, char *buf)
2647
{
2648
	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2649

2650
	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2651 2652
}

B
Bart Van Assche 已提交
2653 2654 2655 2656 2657 2658 2659 2660
static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
			     char *buf)
{
	struct srp_target_port *target = host_to_target(class_to_shost(dev));

	return sprintf(buf, "%d\n", target->ch_count);
}

2661 2662 2663 2664 2665 2666 2667 2668
static ssize_t show_comp_vector(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct srp_target_port *target = host_to_target(class_to_shost(dev));

	return sprintf(buf, "%d\n", target->comp_vector);
}

2669 2670 2671 2672 2673 2674 2675 2676
static ssize_t show_tl_retry_count(struct device *dev,
				   struct device_attribute *attr, char *buf)
{
	struct srp_target_port *target = host_to_target(class_to_shost(dev));

	return sprintf(buf, "%d\n", target->tl_retry_count);
}

2677 2678 2679 2680 2681 2682 2683 2684
static ssize_t show_cmd_sg_entries(struct device *dev,
				   struct device_attribute *attr, char *buf)
{
	struct srp_target_port *target = host_to_target(class_to_shost(dev));

	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
}

2685 2686 2687 2688 2689 2690 2691 2692
static ssize_t show_allow_ext_sg(struct device *dev,
				 struct device_attribute *attr, char *buf)
{
	struct srp_target_port *target = host_to_target(class_to_shost(dev));

	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
}

2693 2694 2695 2696
static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
B
Bart Van Assche 已提交
2697
static DEVICE_ATTR(sgid,	    S_IRUGO, show_sgid,		   NULL);
2698 2699
static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
2700
static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2701 2702 2703
static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
B
Bart Van Assche 已提交
2704
static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
2705
static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
2706
static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
2707
static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2708
static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
2709 2710 2711 2712 2713 2714

static struct device_attribute *srp_host_attrs[] = {
	&dev_attr_id_ext,
	&dev_attr_ioc_guid,
	&dev_attr_service_id,
	&dev_attr_pkey,
B
Bart Van Assche 已提交
2715
	&dev_attr_sgid,
2716 2717
	&dev_attr_dgid,
	&dev_attr_orig_dgid,
2718
	&dev_attr_req_lim,
2719 2720 2721
	&dev_attr_zero_req_lim,
	&dev_attr_local_ib_port,
	&dev_attr_local_ib_device,
B
Bart Van Assche 已提交
2722
	&dev_attr_ch_count,
2723
	&dev_attr_comp_vector,
2724
	&dev_attr_tl_retry_count,
2725
	&dev_attr_cmd_sg_entries,
2726
	&dev_attr_allow_ext_sg,
2727 2728 2729
	NULL
};

2730 2731
static struct scsi_host_template srp_template = {
	.module				= THIS_MODULE,
R
Roland Dreier 已提交
2732 2733
	.name				= "InfiniBand SRP initiator",
	.proc_name			= DRV_NAME,
2734
	.slave_configure		= srp_slave_configure,
2735 2736
	.info				= srp_target_info,
	.queuecommand			= srp_queuecommand,
2737
	.change_queue_depth             = srp_change_queue_depth,
2738 2739 2740
	.eh_abort_handler		= srp_abort,
	.eh_device_reset_handler	= srp_reset_device,
	.eh_host_reset_handler		= srp_reset_host,
B
Bart Van Assche 已提交
2741
	.skip_settle_delay		= true,
2742
	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
2743
	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
2744
	.this_id			= -1,
2745
	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
2746
	.use_clustering			= ENABLE_CLUSTERING,
B
Bart Van Assche 已提交
2747 2748
	.shost_attrs			= srp_host_attrs,
	.use_blk_tags			= 1,
2749
	.track_queue_depth		= 1,
2750 2751
};

2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762
static int srp_sdev_count(struct Scsi_Host *host)
{
	struct scsi_device *sdev;
	int c = 0;

	shost_for_each_device(sdev, host)
		c++;

	return c;
}

2763 2764
static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
{
2765 2766 2767
	struct srp_rport_identifiers ids;
	struct srp_rport *rport;

2768
	target->state = SRP_TARGET_SCANNING;
2769
	sprintf(target->target_name, "SRP.T10:%016llX",
2770
		be64_to_cpu(target->id_ext));
2771

2772
	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2773 2774
		return -ENODEV;

2775 2776
	memcpy(ids.port_id, &target->id_ext, 8);
	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2777
	ids.roles = SRP_RPORT_ROLE_TARGET;
2778 2779 2780 2781 2782 2783
	rport = srp_rport_add(target->scsi_host, &ids);
	if (IS_ERR(rport)) {
		scsi_remove_host(target->scsi_host);
		return PTR_ERR(rport);
	}

2784
	rport->lld_data = target;
2785
	target->rport = rport;
2786

2787
	spin_lock(&host->target_lock);
2788
	list_add_tail(&target->list, &host->target_list);
2789
	spin_unlock(&host->target_lock);
2790 2791

	scsi_scan_target(&target->scsi_host->shost_gendev,
2792
			 0, target->scsi_id, SCAN_WILD_CARD, 0);
2793

2794 2795
	if (srp_connected_ch(target) < target->ch_count ||
	    target->qp_in_error) {
2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811
		shost_printk(KERN_INFO, target->scsi_host,
			     PFX "SCSI scan failed - removing SCSI host\n");
		srp_queue_remove_work(target);
		goto out;
	}

	pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
		 dev_name(&target->scsi_host->shost_gendev),
		 srp_sdev_count(target->scsi_host));

	spin_lock_irq(&target->lock);
	if (target->state == SRP_TARGET_SCANNING)
		target->state = SRP_TARGET_LIVE;
	spin_unlock_irq(&target->lock);

out:
2812 2813 2814
	return 0;
}

2815
static void srp_release_dev(struct device *dev)
2816 2817
{
	struct srp_host *host =
2818
		container_of(dev, struct srp_host, dev);
2819 2820 2821 2822 2823 2824

	complete(&host->released);
}

static struct class srp_class = {
	.name    = "infiniband_srp",
2825
	.dev_release = srp_release_dev
2826 2827
};

2828 2829
/**
 * srp_conn_unique() - check whether the connection to a target is unique
2830 2831
 * @host:   SRP host.
 * @target: SRP target port.
2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859
 */
static bool srp_conn_unique(struct srp_host *host,
			    struct srp_target_port *target)
{
	struct srp_target_port *t;
	bool ret = false;

	if (target->state == SRP_TARGET_REMOVED)
		goto out;

	ret = true;

	spin_lock(&host->target_lock);
	list_for_each_entry(t, &host->target_list, list) {
		if (t != target &&
		    target->id_ext == t->id_ext &&
		    target->ioc_guid == t->ioc_guid &&
		    target->initiator_ext == t->initiator_ext) {
			ret = false;
			break;
		}
	}
	spin_unlock(&host->target_lock);

out:
	return ret;
}

2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
/*
 * Target ports are added by writing
 *
 *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
 *     pkey=<P_Key>,service_id=<service ID>
 *
 * to the add_target sysfs attribute.
 */
enum {
	SRP_OPT_ERR		= 0,
	SRP_OPT_ID_EXT		= 1 << 0,
	SRP_OPT_IOC_GUID	= 1 << 1,
	SRP_OPT_DGID		= 1 << 2,
	SRP_OPT_PKEY		= 1 << 3,
	SRP_OPT_SERVICE_ID	= 1 << 4,
	SRP_OPT_MAX_SECT	= 1 << 5,
2876
	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
2877
	SRP_OPT_IO_CLASS	= 1 << 7,
2878
	SRP_OPT_INITIATOR_EXT	= 1 << 8,
2879
	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
2880 2881
	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
	SRP_OPT_SG_TABLESIZE	= 1 << 11,
2882
	SRP_OPT_COMP_VECTOR	= 1 << 12,
2883
	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
2884
	SRP_OPT_QUEUE_SIZE	= 1 << 14,
2885 2886 2887 2888 2889 2890 2891
	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
				   SRP_OPT_IOC_GUID	|
				   SRP_OPT_DGID		|
				   SRP_OPT_PKEY		|
				   SRP_OPT_SERVICE_ID),
};

2892
static const match_table_t srp_opt_tokens = {
2893 2894 2895 2896 2897 2898 2899
	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
	{ SRP_OPT_DGID,			"dgid=%s" 		},
	{ SRP_OPT_PKEY,			"pkey=%x" 		},
	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
2900
	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
2901
	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
2902
	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
2903 2904
	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
2905
	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
2906
	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
2907
	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
2908
	{ SRP_OPT_ERR,			NULL 			}
2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926
};

static int srp_parse_options(const char *buf, struct srp_target_port *target)
{
	char *options, *sep_opt;
	char *p;
	char dgid[3];
	substring_t args[MAX_OPT_ARGS];
	int opt_mask = 0;
	int token;
	int ret = -EINVAL;
	int i;

	options = kstrdup(buf, GFP_KERNEL);
	if (!options)
		return -ENOMEM;

	sep_opt = options;
2927
	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2928 2929 2930 2931 2932 2933 2934 2935 2936
		if (!*p)
			continue;

		token = match_token(p, srp_opt_tokens, args);
		opt_mask |= token;

		switch (token) {
		case SRP_OPT_ID_EXT:
			p = match_strdup(args);
2937 2938 2939 2940
			if (!p) {
				ret = -ENOMEM;
				goto out;
			}
2941 2942 2943 2944 2945 2946
			target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
			kfree(p);
			break;

		case SRP_OPT_IOC_GUID:
			p = match_strdup(args);
2947 2948 2949 2950
			if (!p) {
				ret = -ENOMEM;
				goto out;
			}
2951 2952 2953 2954 2955 2956
			target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
			kfree(p);
			break;

		case SRP_OPT_DGID:
			p = match_strdup(args);
2957 2958 2959 2960
			if (!p) {
				ret = -ENOMEM;
				goto out;
			}
2961
			if (strlen(p) != 32) {
2962
				pr_warn("bad dest GID parameter '%s'\n", p);
2963
				kfree(p);
2964 2965 2966 2967
				goto out;
			}

			for (i = 0; i < 16; ++i) {
2968 2969 2970 2971 2972 2973 2974
				strlcpy(dgid, p + i * 2, sizeof(dgid));
				if (sscanf(dgid, "%hhx",
					   &target->orig_dgid.raw[i]) < 1) {
					ret = -EINVAL;
					kfree(p);
					goto out;
				}
2975
			}
2976
			kfree(p);
2977 2978 2979 2980
			break;

		case SRP_OPT_PKEY:
			if (match_hex(args, &token)) {
2981
				pr_warn("bad P_Key parameter '%s'\n", p);
2982 2983
				goto out;
			}
2984
			target->pkey = cpu_to_be16(token);
2985 2986 2987 2988
			break;

		case SRP_OPT_SERVICE_ID:
			p = match_strdup(args);
2989 2990 2991 2992
			if (!p) {
				ret = -ENOMEM;
				goto out;
			}
2993 2994 2995 2996 2997 2998
			target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
			kfree(p);
			break;

		case SRP_OPT_MAX_SECT:
			if (match_int(args, &token)) {
2999
				pr_warn("bad max sect parameter '%s'\n", p);
3000 3001 3002 3003 3004
				goto out;
			}
			target->scsi_host->max_sectors = token;
			break;

3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016
		case SRP_OPT_QUEUE_SIZE:
			if (match_int(args, &token) || token < 1) {
				pr_warn("bad queue_size parameter '%s'\n", p);
				goto out;
			}
			target->scsi_host->can_queue = token;
			target->queue_size = token + SRP_RSP_SQ_SIZE +
					     SRP_TSK_MGMT_SQ_SIZE;
			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
				target->scsi_host->cmd_per_lun = token;
			break;

3017
		case SRP_OPT_MAX_CMD_PER_LUN:
3018
			if (match_int(args, &token) || token < 1) {
3019 3020
				pr_warn("bad max cmd_per_lun parameter '%s'\n",
					p);
3021 3022
				goto out;
			}
3023
			target->scsi_host->cmd_per_lun = token;
3024 3025
			break;

3026 3027
		case SRP_OPT_IO_CLASS:
			if (match_hex(args, &token)) {
3028
				pr_warn("bad IO class parameter '%s'\n", p);
3029 3030 3031 3032
				goto out;
			}
			if (token != SRP_REV10_IB_IO_CLASS &&
			    token != SRP_REV16A_IB_IO_CLASS) {
3033 3034 3035
				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
					token, SRP_REV10_IB_IO_CLASS,
					SRP_REV16A_IB_IO_CLASS);
3036 3037 3038 3039 3040
				goto out;
			}
			target->io_class = token;
			break;

3041 3042
		case SRP_OPT_INITIATOR_EXT:
			p = match_strdup(args);
3043 3044 3045 3046
			if (!p) {
				ret = -ENOMEM;
				goto out;
			}
3047 3048 3049 3050
			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
			kfree(p);
			break;

3051 3052
		case SRP_OPT_CMD_SG_ENTRIES:
			if (match_int(args, &token) || token < 1 || token > 255) {
3053 3054
				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
					p);
3055 3056 3057 3058 3059
				goto out;
			}
			target->cmd_sg_cnt = token;
			break;

3060 3061
		case SRP_OPT_ALLOW_EXT_SG:
			if (match_int(args, &token)) {
3062
				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3063 3064 3065 3066 3067 3068 3069 3070
				goto out;
			}
			target->allow_ext_sg = !!token;
			break;

		case SRP_OPT_SG_TABLESIZE:
			if (match_int(args, &token) || token < 1 ||
					token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3071 3072
				pr_warn("bad max sg_tablesize parameter '%s'\n",
					p);
3073 3074 3075 3076 3077
				goto out;
			}
			target->sg_tablesize = token;
			break;

3078 3079 3080 3081 3082 3083 3084 3085
		case SRP_OPT_COMP_VECTOR:
			if (match_int(args, &token) || token < 0) {
				pr_warn("bad comp_vector parameter '%s'\n", p);
				goto out;
			}
			target->comp_vector = token;
			break;

3086 3087 3088 3089 3090 3091 3092 3093 3094
		case SRP_OPT_TL_RETRY_COUNT:
			if (match_int(args, &token) || token < 2 || token > 7) {
				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
					p);
				goto out;
			}
			target->tl_retry_count = token;
			break;

3095
		default:
3096 3097
			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
				p);
3098 3099 3100 3101 3102 3103 3104 3105 3106 3107
			goto out;
		}
	}

	if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
		ret = 0;
	else
		for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
			if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
			    !(srp_opt_tokens[i].token & opt_mask))
3108 3109
				pr_warn("target creation request is missing parameter '%s'\n",
					srp_opt_tokens[i].pattern);
3110

3111 3112 3113 3114 3115 3116
	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
			target->scsi_host->cmd_per_lun,
			target->scsi_host->can_queue);

3117 3118 3119 3120 3121
out:
	kfree(options);
	return ret;
}

3122 3123
static ssize_t srp_create_target(struct device *dev,
				 struct device_attribute *attr,
3124 3125 3126
				 const char *buf, size_t count)
{
	struct srp_host *host =
3127
		container_of(dev, struct srp_host, dev);
3128 3129
	struct Scsi_Host *target_host;
	struct srp_target_port *target;
3130
	struct srp_rdma_ch *ch;
3131 3132
	struct srp_device *srp_dev = host->srp_dev;
	struct ib_device *ibdev = srp_dev->dev;
B
Bart Van Assche 已提交
3133 3134
	int ret, node_idx, node, cpu, i;
	bool multich = false;
3135 3136 3137 3138 3139 3140

	target_host = scsi_host_alloc(&srp_template,
				      sizeof (struct srp_target_port));
	if (!target_host)
		return -ENOMEM;

3141
	target_host->transportt  = ib_srp_transport_template;
3142 3143
	target_host->max_channel = 0;
	target_host->max_id      = 1;
B
Bart Van Assche 已提交
3144
	target_host->max_lun     = -1LL;
A
Arne Redlich 已提交
3145
	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
R
Roland Dreier 已提交
3146

3147 3148
	target = host_to_target(target_host);

3149 3150 3151 3152 3153 3154
	target->io_class	= SRP_REV16A_IB_IO_CLASS;
	target->scsi_host	= target_host;
	target->srp_host	= host;
	target->lkey		= host->srp_dev->mr->lkey;
	target->rkey		= host->srp_dev->mr->rkey;
	target->cmd_sg_cnt	= cmd_sg_entries;
3155 3156
	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
	target->allow_ext_sg	= allow_ext_sg;
3157
	target->tl_retry_count	= 7;
3158
	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3159

3160 3161 3162 3163 3164 3165
	/*
	 * Avoid that the SCSI host can be removed by srp_remove_target()
	 * before this function returns.
	 */
	scsi_host_get(target->scsi_host);

3166 3167
	mutex_lock(&host->add_target_mutex);

3168 3169
	ret = srp_parse_options(buf, target);
	if (ret)
3170
		goto out;
3171

B
Bart Van Assche 已提交
3172 3173
	ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
	if (ret)
3174
		goto out;
B
Bart Van Assche 已提交
3175

3176 3177
	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;

3178 3179 3180 3181 3182 3183 3184
	if (!srp_conn_unique(target->srp_host, target)) {
		shost_printk(KERN_INFO, target->scsi_host,
			     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
			     be64_to_cpu(target->id_ext),
			     be64_to_cpu(target->ioc_guid),
			     be64_to_cpu(target->initiator_ext));
		ret = -EEXIST;
3185
		goto out;
3186 3187
	}

3188
	if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3189
	    target->cmd_sg_cnt < target->sg_tablesize) {
3190
		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3191 3192 3193 3194 3195 3196
		target->sg_tablesize = target->cmd_sg_cnt;
	}

	target_host->sg_tablesize = target->sg_tablesize;
	target->indirect_size = target->sg_tablesize *
				sizeof (struct srp_direct_buf);
3197 3198 3199 3200
	target->max_iu_len = sizeof (struct srp_cmd) +
			     sizeof (struct srp_indirect_buf) +
			     target->cmd_sg_cnt * sizeof (struct srp_direct_buf);

3201
	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3202
	INIT_WORK(&target->remove_work, srp_remove_work);
3203
	spin_lock_init(&target->lock);
3204
	ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
3205
	if (ret)
3206
		goto out;
3207

B
Bart Van Assche 已提交
3208 3209 3210 3211 3212 3213 3214 3215 3216
	ret = -ENOMEM;
	target->ch_count = max_t(unsigned, num_online_nodes(),
				 min(ch_count ? :
				     min(4 * num_online_nodes(),
					 ibdev->num_comp_vectors),
				     num_online_cpus()));
	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
			     GFP_KERNEL);
	if (!target->ch)
3217
		goto out;
3218

B
Bart Van Assche 已提交
3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246
	node_idx = 0;
	for_each_online_node(node) {
		const int ch_start = (node_idx * target->ch_count /
				      num_online_nodes());
		const int ch_end = ((node_idx + 1) * target->ch_count /
				    num_online_nodes());
		const int cv_start = (node_idx * ibdev->num_comp_vectors /
				      num_online_nodes() + target->comp_vector)
				     % ibdev->num_comp_vectors;
		const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
				    num_online_nodes() + target->comp_vector)
				   % ibdev->num_comp_vectors;
		int cpu_idx = 0;

		for_each_online_cpu(cpu) {
			if (cpu_to_node(cpu) != node)
				continue;
			if (ch_start + cpu_idx >= ch_end)
				continue;
			ch = &target->ch[ch_start + cpu_idx];
			ch->target = target;
			ch->comp_vector = cv_start == cv_end ? cv_start :
				cv_start + cpu_idx % (cv_end - cv_start);
			spin_lock_init(&ch->lock);
			INIT_LIST_HEAD(&ch->free_tx);
			ret = srp_new_cm_id(ch);
			if (ret)
				goto err_disconnect;
3247

B
Bart Van Assche 已提交
3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275
			ret = srp_create_ch_ib(ch);
			if (ret)
				goto err_disconnect;

			ret = srp_alloc_req_data(ch);
			if (ret)
				goto err_disconnect;

			ret = srp_connect_ch(ch, multich);
			if (ret) {
				shost_printk(KERN_ERR, target->scsi_host,
					     PFX "Connection %d/%d failed\n",
					     ch_start + cpu_idx,
					     target->ch_count);
				if (node_idx == 0 && cpu_idx == 0) {
					goto err_disconnect;
				} else {
					srp_free_ch_ib(target, ch);
					srp_free_req_data(target, ch);
					target->ch_count = ch - target->ch;
					break;
				}
			}

			multich = true;
			cpu_idx++;
		}
		node_idx++;
3276 3277
	}

B
Bart Van Assche 已提交
3278 3279
	target->scsi_host->nr_hw_queues = target->ch_count;

3280 3281 3282 3283
	ret = srp_add_target(host, target);
	if (ret)
		goto err_disconnect;

3284 3285 3286 3287 3288
	if (target->state != SRP_TARGET_REMOVED) {
		shost_printk(KERN_DEBUG, target->scsi_host, PFX
			     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
			     be64_to_cpu(target->id_ext),
			     be64_to_cpu(target->ioc_guid),
3289
			     be16_to_cpu(target->pkey),
3290
			     be64_to_cpu(target->service_id),
3291
			     target->sgid.raw, target->orig_dgid.raw);
3292
	}
B
Bart Van Assche 已提交
3293

3294 3295 3296 3297
	ret = count;

out:
	mutex_unlock(&host->add_target_mutex);
3298 3299 3300

	scsi_host_put(target->scsi_host);

3301
	return ret;
3302 3303 3304 3305

err_disconnect:
	srp_disconnect_target(target);

B
Bart Van Assche 已提交
3306 3307 3308 3309 3310
	for (i = 0; i < target->ch_count; i++) {
		ch = &target->ch[i];
		srp_free_ch_ib(target, ch);
		srp_free_req_data(target, ch);
	}
3311

B
Bart Van Assche 已提交
3312
	kfree(target->ch);
3313
	goto out;
3314 3315
}

3316
static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3317

3318 3319
static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
			  char *buf)
3320
{
3321
	struct srp_host *host = container_of(dev, struct srp_host, dev);
3322

3323
	return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3324 3325
}

3326
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3327

3328 3329
static ssize_t show_port(struct device *dev, struct device_attribute *attr,
			 char *buf)
3330
{
3331
	struct srp_host *host = container_of(dev, struct srp_host, dev);
3332 3333 3334 3335

	return sprintf(buf, "%d\n", host->port);
}

3336
static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3337

3338
static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3339 3340 3341 3342 3343 3344 3345 3346
{
	struct srp_host *host;

	host = kzalloc(sizeof *host, GFP_KERNEL);
	if (!host)
		return NULL;

	INIT_LIST_HEAD(&host->target_list);
3347
	spin_lock_init(&host->target_lock);
3348
	init_completion(&host->released);
3349
	mutex_init(&host->add_target_mutex);
3350
	host->srp_dev = device;
3351 3352
	host->port = port;

3353 3354
	host->dev.class = &srp_class;
	host->dev.parent = device->dev->dma_device;
3355
	dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3356

3357
	if (device_register(&host->dev))
3358
		goto free_host;
3359
	if (device_create_file(&host->dev, &dev_attr_add_target))
3360
		goto err_class;
3361
	if (device_create_file(&host->dev, &dev_attr_ibdev))
3362
		goto err_class;
3363
	if (device_create_file(&host->dev, &dev_attr_port))
3364 3365 3366 3367 3368
		goto err_class;

	return host;

err_class:
3369
	device_unregister(&host->dev);
3370

3371
free_host:
3372 3373 3374 3375 3376 3377 3378
	kfree(host);

	return NULL;
}

static void srp_add_one(struct ib_device *device)
{
3379 3380
	struct srp_device *srp_dev;
	struct ib_device_attr *dev_attr;
3381
	struct srp_host *host;
3382
	int mr_page_shift, p;
3383
	u64 max_pages_per_mr;
3384

3385 3386
	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
	if (!dev_attr)
3387
		return;
3388

3389
	if (ib_query_device(device, dev_attr)) {
3390
		pr_warn("Query device failed for %s\n", device->name);
3391 3392 3393 3394 3395 3396 3397
		goto free_attr;
	}

	srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
	if (!srp_dev)
		goto free_attr;

3398 3399
	srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
			    device->map_phys_fmr && device->unmap_fmr);
3400 3401 3402 3403 3404 3405 3406
	srp_dev->has_fr = (dev_attr->device_cap_flags &
			   IB_DEVICE_MEM_MGT_EXTENSIONS);
	if (!srp_dev->has_fmr && !srp_dev->has_fr)
		dev_warn(&device->dev, "neither FMR nor FR is supported\n");

	srp_dev->use_fast_reg = (srp_dev->has_fr &&
				 (!srp_dev->has_fmr || prefer_fr));
3407

3408 3409
	/*
	 * Use the smallest page size supported by the HCA, down to a
3410 3411
	 * minimum of 4096 bytes. We're unlikely to build large sglists
	 * out of smaller entries.
3412
	 */
3413 3414 3415 3416 3417 3418 3419
	mr_page_shift		= max(12, ffs(dev_attr->page_size_cap) - 1);
	srp_dev->mr_page_size	= 1 << mr_page_shift;
	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
	max_pages_per_mr	= dev_attr->max_mr_size;
	do_div(max_pages_per_mr, srp_dev->mr_page_size);
	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
					  max_pages_per_mr);
3420 3421 3422 3423 3424
	if (srp_dev->use_fast_reg) {
		srp_dev->max_pages_per_mr =
			min_t(u32, srp_dev->max_pages_per_mr,
			      dev_attr->max_fast_reg_page_list_len);
	}
3425 3426
	srp_dev->mr_max_size	= srp_dev->mr_page_size *
				   srp_dev->max_pages_per_mr;
3427
	pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3428
		 device->name, mr_page_shift, dev_attr->max_mr_size,
3429
		 dev_attr->max_fast_reg_page_list_len,
3430
		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445

	INIT_LIST_HEAD(&srp_dev->dev_list);

	srp_dev->dev = device;
	srp_dev->pd  = ib_alloc_pd(device);
	if (IS_ERR(srp_dev->pd))
		goto free_dev;

	srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
				    IB_ACCESS_LOCAL_WRITE |
				    IB_ACCESS_REMOTE_READ |
				    IB_ACCESS_REMOTE_WRITE);
	if (IS_ERR(srp_dev->mr))
		goto err_pd;

3446
	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3447
		host = srp_add_port(srp_dev, p);
3448
		if (host)
3449
			list_add_tail(&host->list, &srp_dev->dev_list);
3450 3451
	}

3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463
	ib_set_client_data(device, &srp_client, srp_dev);

	goto free_attr;

err_pd:
	ib_dealloc_pd(srp_dev->pd);

free_dev:
	kfree(srp_dev);

free_attr:
	kfree(dev_attr);
3464 3465 3466 3467
}

static void srp_remove_one(struct ib_device *device)
{
3468
	struct srp_device *srp_dev;
3469
	struct srp_host *host, *tmp_host;
3470
	struct srp_target_port *target;
3471

3472
	srp_dev = ib_get_client_data(device, &srp_client);
3473 3474
	if (!srp_dev)
		return;
3475

3476
	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3477
		device_unregister(&host->dev);
3478 3479 3480 3481 3482 3483 3484
		/*
		 * Wait for the sysfs entry to go away, so that no new
		 * target ports can be created.
		 */
		wait_for_completion(&host->released);

		/*
3485
		 * Remove all target ports.
3486
		 */
3487
		spin_lock(&host->target_lock);
3488 3489
		list_for_each_entry(target, &host->target_list, list)
			srp_queue_remove_work(target);
3490
		spin_unlock(&host->target_lock);
3491 3492

		/*
3493
		 * Wait for tl_err and target port removal tasks.
3494
		 */
3495
		flush_workqueue(system_long_wq);
3496
		flush_workqueue(srp_remove_wq);
3497 3498 3499 3500

		kfree(host);
	}

3501 3502 3503 3504
	ib_dereg_mr(srp_dev->mr);
	ib_dealloc_pd(srp_dev->pd);

	kfree(srp_dev);
3505 3506
}

3507
static struct srp_function_template ib_srp_transport_functions = {
3508 3509
	.has_rport_state	 = true,
	.reset_timer_if_blocked	 = true,
3510
	.reconnect_delay	 = &srp_reconnect_delay,
3511 3512 3513
	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
	.dev_loss_tmo		 = &srp_dev_loss_tmo,
	.reconnect		 = srp_rport_reconnect,
3514
	.rport_delete		 = srp_rport_delete,
3515
	.terminate_rport_io	 = srp_terminate_io,
3516 3517
};

3518 3519 3520 3521
static int __init srp_init_module(void)
{
	int ret;

3522
	BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3523

3524
	if (srp_sg_tablesize) {
3525
		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3526 3527 3528 3529 3530 3531 3532 3533
		if (!cmd_sg_entries)
			cmd_sg_entries = srp_sg_tablesize;
	}

	if (!cmd_sg_entries)
		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;

	if (cmd_sg_entries > 255) {
3534
		pr_warn("Clamping cmd_sg_entries to 255\n");
3535
		cmd_sg_entries = 255;
3536 3537
	}

3538 3539 3540
	if (!indirect_sg_entries)
		indirect_sg_entries = cmd_sg_entries;
	else if (indirect_sg_entries < cmd_sg_entries) {
3541 3542
		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
			cmd_sg_entries);
3543 3544 3545
		indirect_sg_entries = cmd_sg_entries;
	}

3546
	srp_remove_wq = create_workqueue("srp_remove");
3547 3548
	if (!srp_remove_wq) {
		ret = -ENOMEM;
3549 3550 3551 3552
		goto out;
	}

	ret = -ENOMEM;
3553 3554 3555
	ib_srp_transport_template =
		srp_attach_transport(&ib_srp_transport_functions);
	if (!ib_srp_transport_template)
3556
		goto destroy_wq;
3557

3558 3559
	ret = class_register(&srp_class);
	if (ret) {
3560
		pr_err("couldn't register class infiniband_srp\n");
3561
		goto release_tr;
3562 3563
	}

3564 3565
	ib_sa_register_client(&srp_sa_client);

3566 3567
	ret = ib_register_client(&srp_client);
	if (ret) {
3568
		pr_err("couldn't register IB client\n");
3569
		goto unreg_sa;
3570 3571
	}

3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584
out:
	return ret;

unreg_sa:
	ib_sa_unregister_client(&srp_sa_client);
	class_unregister(&srp_class);

release_tr:
	srp_release_transport(ib_srp_transport_template);

destroy_wq:
	destroy_workqueue(srp_remove_wq);
	goto out;
3585 3586 3587 3588 3589
}

static void __exit srp_cleanup_module(void)
{
	ib_unregister_client(&srp_client);
3590
	ib_sa_unregister_client(&srp_sa_client);
3591
	class_unregister(&srp_class);
3592
	srp_release_transport(ib_srp_transport_template);
3593
	destroy_workqueue(srp_remove_wq);
3594 3595 3596 3597
}

module_init(srp_init_module);
module_exit(srp_cleanup_module);